InternalCallVerifierEqualityVerifier
@Test public void testInvalidIdentifier() throws Exception {
DFSAdmin admin=new DFSAdmin(config);
String[] args=new String[]{"-refresh","localhost:" + cluster.getNameNodePort(),"unregisteredIdentity"};
int exitCode=admin.run(args);
assertEquals("DFSAdmin should fail due to no handler registered",-1,exitCode);
}
InternalCallVerifierEqualityVerifier
@Test public void testInvalidCommand() throws Exception {
DFSAdmin admin=new DFSAdmin(config);
String[] args=new String[]{"-refresh","nn"};
int exitCode=admin.run(args);
assertEquals("DFSAdmin should fail due to bad args",-1,exitCode);
}
InternalCallVerifierEqualityVerifier
@Test public void testVariableArgs() throws Exception {
DFSAdmin admin=new DFSAdmin(config);
String[] args=new String[]{"-refresh","localhost:" + cluster.getNameNodePort(),"secondHandler","one"};
int exitCode=admin.run(args);
assertEquals("DFSAdmin should return 2",2,exitCode);
exitCode=admin.run(new String[]{"-refresh","localhost:" + cluster.getNameNodePort(),"secondHandler","one","two"});
assertEquals("DFSAdmin should now return 3",3,exitCode);
Mockito.verify(secondHandler).handleRefresh("secondHandler",new String[]{"one"});
Mockito.verify(secondHandler).handleRefresh("secondHandler",new String[]{"one","two"});
}
InternalCallVerifierEqualityVerifier
@Test public void testValidIdentifier() throws Exception {
DFSAdmin admin=new DFSAdmin(config);
String[] args=new String[]{"-refresh","localhost:" + cluster.getNameNodePort(),"firstHandler"};
int exitCode=admin.run(args);
assertEquals("DFSAdmin should succeed",0,exitCode);
Mockito.verify(firstHandler).handleRefresh("firstHandler",new String[]{});
Mockito.verify(secondHandler,Mockito.never()).handleRefresh(Mockito.anyString(),Mockito.any(String[].class));
}
Class: org.apache.hadoop.TestRefreshCallQueue
UtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testRefresh() throws Exception {
assertTrue("Mock queue should have been constructed",mockQueueConstructions > 0);
assertTrue("Puts are routed through MockQueue",canPutInMockQueue());
int lastMockQueueConstructions=mockQueueConstructions;
DFSAdmin admin=new DFSAdmin(config);
String[] args=new String[]{"-refreshCallQueue"};
int exitCode=admin.run(args);
assertEquals("DFSAdmin should return 0",0,exitCode);
assertEquals("Mock queue should have no additional constructions",lastMockQueueConstructions,mockQueueConstructions);
try {
assertFalse("Puts are routed through LBQ instead of MockQueue",canPutInMockQueue());
}
catch ( IOException ioe) {
fail("Could not put into queue at all");
}
}
Class: org.apache.hadoop.conf.TestConfServlet
UtilityVerifierEqualityVerifierHybridVerifier
@Test public void testBadFormat() throws Exception {
StringWriter sw=new StringWriter();
try {
ConfServlet.writeResponse(getTestConf(),sw,"not a format");
fail("writeResponse with bad format didn't throw!");
}
catch ( ConfServlet.BadFormatException bfe) {
}
assertEquals("",sw.toString());
}
BranchVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testIteratorWithDeprecatedKeysMappedToMultipleNewKeys(){
Configuration conf=new Configuration();
Configuration.addDeprecation("dK",new String[]{"nK1","nK2"});
conf.set("k","v");
conf.set("dK","V");
assertEquals("V",conf.get("dK"));
assertEquals("V",conf.get("nK1"));
assertEquals("V",conf.get("nK2"));
conf.set("nK1","VV");
assertEquals("VV",conf.get("dK"));
assertEquals("VV",conf.get("nK1"));
assertEquals("VV",conf.get("nK2"));
conf.set("nK2","VVV");
assertEquals("VVV",conf.get("dK"));
assertEquals("VVV",conf.get("nK2"));
assertEquals("VVV",conf.get("nK1"));
boolean kFound=false;
boolean dKFound=false;
boolean nK1Found=false;
boolean nK2Found=false;
for ( Map.Entry entry : conf) {
if (entry.getKey().equals("k")) {
assertEquals("v",entry.getValue());
kFound=true;
}
if (entry.getKey().equals("dK")) {
assertEquals("VVV",entry.getValue());
dKFound=true;
}
if (entry.getKey().equals("nK1")) {
assertEquals("VVV",entry.getValue());
nK1Found=true;
}
if (entry.getKey().equals("nK2")) {
assertEquals("VVV",entry.getValue());
nK2Found=true;
}
}
assertTrue("regular Key not found",kFound);
assertTrue("deprecated Key not found",dKFound);
assertTrue("new Key 1 not found",nK1Found);
assertTrue("new Key 2 not found",nK2Found);
}
Class: org.apache.hadoop.conf.TestJobConf
InternalCallVerifierEqualityVerifier
/**
* Test that negative values for new configuration keys get passed through.
*/
@Test public void testNegativeValuesForMemoryParams(){
JobConf configuration=new JobConf();
configuration.set(MRJobConfig.MAP_MEMORY_MB,"-5");
configuration.set(MRJobConfig.REDUCE_MEMORY_MB,"-6");
Assert.assertEquals(-5,configuration.getMemoryForMapTask());
Assert.assertEquals(-6,configuration.getMemoryForReduceTask());
}
InternalCallVerifierEqualityVerifier
/**
* Test that negative values for MAPRED_TASK_MAXVMEM_PROPERTY cause
* new configuration keys' values to be used.
*/
@Test public void testNegativeValueForTaskVmem(){
JobConf configuration=new JobConf();
configuration.set(JobConf.MAPRED_TASK_MAXVMEM_PROPERTY,"-3");
Assert.assertEquals(MRJobConfig.DEFAULT_MAP_MEMORY_MB,configuration.getMemoryForMapTask());
Assert.assertEquals(MRJobConfig.DEFAULT_REDUCE_MEMORY_MB,configuration.getMemoryForReduceTask());
configuration.set(MRJobConfig.MAP_MEMORY_MB,"4");
configuration.set(MRJobConfig.REDUCE_MEMORY_MB,"5");
Assert.assertEquals(4,configuration.getMemoryForMapTask());
Assert.assertEquals(5,configuration.getMemoryForReduceTask());
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test HA failover, where BK, as the shared storage, fails.
* Once it becomes available again, a standby can come up.
* Verify that any write happening after the BK fail is not
* available on the standby.
*/
@Test public void testFailoverWithFailingBKCluster() throws Exception {
int ensembleSize=numBookies + 1;
BookieServer newBookie=bkutil.newBookie();
assertEquals("New bookie didn't start",ensembleSize,bkutil.checkBookiesUp(ensembleSize,10));
BookieServer replacementBookie=null;
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,BKJMUtil.createJournalURI("/hotfailoverWithFail").toString());
conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_ENSEMBLE_SIZE,ensembleSize);
conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_QUORUM_SIZE,ensembleSize);
BKJMUtil.addJournalManagerDefinition(conf);
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).manageNameDfsSharedDirs(false).checkExitOnShutdown(false).build();
NameNode nn1=cluster.getNameNode(0);
NameNode nn2=cluster.getNameNode(1);
cluster.waitActive();
cluster.transitionToActive(0);
Path p1=new Path("/testBKJMFailingBKCluster1");
Path p2=new Path("/testBKJMFailingBKCluster2");
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
fs.mkdirs(p1);
newBookie.shutdown();
assertEquals("New bookie didn't stop",numBookies,bkutil.checkBookiesUp(numBookies,10));
try {
fs.mkdirs(p2);
fail("mkdirs should result in the NN exiting");
}
catch ( RemoteException re) {
assertTrue(re.getClassName().contains("ExitException"));
}
cluster.shutdownNameNode(0);
try {
cluster.transitionToActive(1);
fail("Shouldn't have been able to transition with bookies down");
}
catch ( ExitException ee) {
assertTrue("Should shutdown due to required journal failure",ee.getMessage().contains("starting log segment 3 failed for required journal"));
}
replacementBookie=bkutil.newBookie();
assertEquals("Replacement bookie didn't start",ensembleSize,bkutil.checkBookiesUp(ensembleSize,10));
cluster.transitionToActive(1);
assertTrue(fs.exists(p1));
assertFalse(fs.exists(p2));
}
finally {
newBookie.shutdown();
if (replacementBookie != null) {
replacementBookie.shutdown();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
UtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that if enough bookies fail to prevent an ensemble,
* writes the bookkeeper will fail. Test that when once again
* an ensemble is available, it can continue to write.
*/
@Test public void testAllBookieFailure() throws Exception {
BookieServer bookieToFail=bkutil.newBookie();
BookieServer replacementBookie=null;
try {
int ensembleSize=numBookies + 1;
assertEquals("New bookie didn't start",ensembleSize,bkutil.checkBookiesUp(ensembleSize,10));
Configuration conf=new Configuration();
conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_ENSEMBLE_SIZE,ensembleSize);
conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_QUORUM_SIZE,ensembleSize);
long txid=1;
NamespaceInfo nsi=newNSInfo();
BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-allbookiefailure"),nsi);
bkjm.format(nsi);
EditLogOutputStream out=bkjm.startLogSegment(txid,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (long i=1; i <= 3; i++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.setReadyToFlush();
out.flush();
bookieToFail.shutdown();
assertEquals("New bookie didn't die",numBookies,bkutil.checkBookiesUp(numBookies,10));
try {
for (long i=1; i <= 3; i++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.setReadyToFlush();
out.flush();
fail("should not get to this stage");
}
catch ( IOException ioe) {
LOG.debug("Error writing to bookkeeper",ioe);
assertTrue("Invalid exception message",ioe.getMessage().contains("Failed to write to bookkeeper"));
}
replacementBookie=bkutil.newBookie();
assertEquals("New bookie didn't start",numBookies + 1,bkutil.checkBookiesUp(numBookies + 1,10));
bkjm.recoverUnfinalizedSegments();
out=bkjm.startLogSegment(txid,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (long i=1; i <= 3; i++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.setReadyToFlush();
out.flush();
}
catch ( Exception e) {
LOG.error("Exception in test",e);
throw e;
}
finally {
if (replacementBookie != null) {
replacementBookie.shutdown();
}
bookieToFail.shutdown();
if (bkutil.checkBookiesUp(numBookies,30) != numBookies) {
LOG.warn("Not all bookies from this test shut down, expect errors");
}
}
}
IterativeVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testNumberOfTransactionsWithInprogressAtEnd() throws Exception {
NamespaceInfo nsi=newNSInfo();
BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-inprogressAtEnd"),nsi);
bkjm.format(nsi);
long txid=1;
for (long i=0; i < 3; i++) {
long start=txid;
EditLogOutputStream out=bkjm.startLogSegment(start,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (long j=1; j <= DEFAULT_SEGMENT_SIZE; j++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.close();
bkjm.finalizeLogSegment(start,(txid - 1));
assertNotNull(zkc.exists(bkjm.finalizedLedgerZNode(start,(txid - 1)),false));
}
long start=txid;
EditLogOutputStream out=bkjm.startLogSegment(start,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (long j=1; j <= DEFAULT_SEGMENT_SIZE / 2; j++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.setReadyToFlush();
out.flush();
out.abort();
out.close();
long numTrans=bkjm.getNumberOfTransactions(1,true);
assertEquals((txid - 1),numTrans);
}
InternalCallVerifierEqualityVerifier
@Test public void testNumberOfTransactions() throws Exception {
NamespaceInfo nsi=newNSInfo();
BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-txncount"),nsi);
bkjm.format(nsi);
EditLogOutputStream out=bkjm.startLogSegment(1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (long i=1; i <= 100; i++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(i);
out.write(op);
}
out.close();
bkjm.finalizeLogSegment(1,100);
long numTrans=bkjm.getNumberOfTransactions(1,true);
assertEquals(100,numTrans);
}
IterativeVerifierUtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testNumberOfTransactionsWithGaps() throws Exception {
NamespaceInfo nsi=newNSInfo();
BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-gaps"),nsi);
bkjm.format(nsi);
long txid=1;
for (long i=0; i < 3; i++) {
long start=txid;
EditLogOutputStream out=bkjm.startLogSegment(start,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (long j=1; j <= DEFAULT_SEGMENT_SIZE; j++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.close();
bkjm.finalizeLogSegment(start,txid - 1);
assertNotNull(zkc.exists(bkjm.finalizedLedgerZNode(start,txid - 1),false));
}
zkc.delete(bkjm.finalizedLedgerZNode(DEFAULT_SEGMENT_SIZE + 1,DEFAULT_SEGMENT_SIZE * 2),-1);
long numTrans=bkjm.getNumberOfTransactions(1,true);
assertEquals(DEFAULT_SEGMENT_SIZE,numTrans);
try {
numTrans=bkjm.getNumberOfTransactions(DEFAULT_SEGMENT_SIZE + 1,true);
fail("Should have thrown corruption exception by this point");
}
catch ( JournalManager.CorruptionException ce) {
}
numTrans=bkjm.getNumberOfTransactions((DEFAULT_SEGMENT_SIZE * 2) + 1,true);
assertEquals(DEFAULT_SEGMENT_SIZE,numTrans);
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Tests that the edit log file meta data reading from ZooKeeper should be
* able to handle the NoNodeException. bkjm.getInputStream(fromTxId,
* inProgressOk) should suppress the NoNodeException and continue. HDFS-3441.
*/
@Test public void testEditLogFileNotExistsWhenReadingMetadata() throws Exception {
URI uri=BKJMUtil.createJournalURI("/hdfsjournal-editlogfile");
NamespaceInfo nsi=newNSInfo();
BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,uri,nsi);
bkjm.format(nsi);
try {
String zkpath1=startAndFinalizeLogSegment(bkjm,1,50);
String zkpath2=startAndFinalizeLogSegment(bkjm,51,100);
ZooKeeper zkspy=spy(BKJMUtil.connectZooKeeper());
bkjm.setZooKeeper(zkspy);
Mockito.doThrow(new KeeperException.NoNodeException(zkpath2 + " doesn't exists")).when(zkspy).getData(zkpath2,false,null);
List ledgerList=bkjm.getLedgerList(false);
assertEquals("List contains the metadata of non exists path.",1,ledgerList.size());
assertEquals("LogLedgerMetadata contains wrong zk paths.",zkpath1,ledgerList.get(0).getZkPath());
}
finally {
bkjm.close();
}
}
EqualityVerifier
@Test public void testSimpleRead() throws Exception {
NamespaceInfo nsi=newNSInfo();
BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-simpleread"),nsi);
bkjm.format(nsi);
final long numTransactions=10000;
EditLogOutputStream out=bkjm.startLogSegment(1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
;
for (long i=1; i <= numTransactions; i++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(i);
out.write(op);
}
out.close();
bkjm.finalizeLogSegment(1,numTransactions);
List in=new ArrayList();
bkjm.selectInputStreams(in,1,true);
try {
assertEquals(numTransactions,FSEditLogTestUtil.countTransactionsInStream(in.get(0)));
}
finally {
in.get(0).close();
}
}
InternalCallVerifierEqualityVerifier
/**
* Test that a BookKeeper JM can continue to work across the
* failure of a bookie. This should be handled transparently
* by bookkeeper.
*/
@Test public void testOneBookieFailure() throws Exception {
BookieServer bookieToFail=bkutil.newBookie();
BookieServer replacementBookie=null;
try {
int ensembleSize=numBookies + 1;
assertEquals("New bookie didn't start",ensembleSize,bkutil.checkBookiesUp(ensembleSize,10));
Configuration conf=new Configuration();
conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_ENSEMBLE_SIZE,ensembleSize);
conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_QUORUM_SIZE,ensembleSize);
long txid=1;
NamespaceInfo nsi=newNSInfo();
BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-onebookiefailure"),nsi);
bkjm.format(nsi);
EditLogOutputStream out=bkjm.startLogSegment(txid,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (long i=1; i <= 3; i++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.setReadyToFlush();
out.flush();
replacementBookie=bkutil.newBookie();
assertEquals("replacement bookie didn't start",ensembleSize + 1,bkutil.checkBookiesUp(ensembleSize + 1,10));
bookieToFail.shutdown();
assertEquals("New bookie didn't die",ensembleSize,bkutil.checkBookiesUp(ensembleSize,10));
for (long i=1; i <= 3; i++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.setReadyToFlush();
out.flush();
}
catch ( Exception e) {
LOG.error("Exception in test",e);
throw e;
}
finally {
if (replacementBookie != null) {
replacementBookie.shutdown();
}
bookieToFail.shutdown();
if (bkutil.checkBookiesUp(numBookies,30) != numBookies) {
LOG.warn("Not all bookies from this test shut down, expect errors");
}
}
}
InternalCallVerifierEqualityVerifier
/**
* Tests that read should be able to read the data which updated with update
* api
*/
@Test public void testReadShouldReturnTheZnodePathAfterUpdate() throws Exception {
String data="inprogressNode";
CurrentInprogress ci=new CurrentInprogress(zkc,CURRENT_NODE_PATH);
ci.init();
ci.update(data);
String inprogressNodePath=ci.read();
assertEquals("Not returning inprogressZnode","inprogressNode",inprogressNodePath);
}
InternalCallVerifierEqualityVerifier
/**
* Tests that read should return null if we clear the updated data in
* CurrentInprogress node
*/
@Test public void testReadShouldReturnNullAfterClear() throws Exception {
CurrentInprogress ci=new CurrentInprogress(zkc,CURRENT_NODE_PATH);
ci.init();
ci.update("myInprogressZnode");
ci.read();
ci.clear();
String inprogressNodePath=ci.read();
assertEquals("Expecting null to be return",null,inprogressNodePath);
}
InternalCallVerifierEqualityVerifierExceptionVerifierHybridVerifier
/**
* Tests that update should throw IOE, if version number modifies between read
* and update
*/
@Test(expected=IOException.class) public void testUpdateShouldFailWithIOEIfVersionNumberChangedAfterRead() throws Exception {
CurrentInprogress ci=new CurrentInprogress(zkc,CURRENT_NODE_PATH);
ci.init();
ci.update("myInprogressZnode");
assertEquals("Not returning myInprogressZnode","myInprogressZnode",ci.read());
ci.update("YourInprogressZnode");
ci.update("myInprogressZnode");
}
BranchVerifierUtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testGenerateEncryptedKey() throws Exception {
KeyProviderCryptoExtension.EncryptedKeyVersion ek1=kpExt.generateEncryptedKey(encryptionKey.getName());
assertEquals("Version name of EEK should be EEK",KeyProviderCryptoExtension.EEK,ek1.getEncryptedKeyVersion().getVersionName());
assertEquals("Name of EEK should be encryption key name",ENCRYPTION_KEY_NAME,ek1.getEncryptionKeyName());
assertNotNull("Expected encrypted key material",ek1.getEncryptedKeyVersion().getMaterial());
assertEquals("Length of encryption key material and EEK material should " + "be the same",encryptionKey.getMaterial().length,ek1.getEncryptedKeyVersion().getMaterial().length);
KeyVersion k1=kpExt.decryptEncryptedKey(ek1);
assertEquals(KeyProviderCryptoExtension.EK,k1.getVersionName());
assertEquals(encryptionKey.getMaterial().length,k1.getMaterial().length);
if (Arrays.equals(k1.getMaterial(),encryptionKey.getMaterial())) {
fail("Encrypted key material should not equal encryption key material");
}
if (Arrays.equals(ek1.getEncryptedKeyVersion().getMaterial(),encryptionKey.getMaterial())) {
fail("Encrypted key material should not equal decrypted key material");
}
KeyVersion k1a=kpExt.decryptEncryptedKey(ek1);
assertArrayEquals(k1.getMaterial(),k1a.getMaterial());
KeyProviderCryptoExtension.EncryptedKeyVersion ek2=kpExt.generateEncryptedKey(encryptionKey.getName());
KeyVersion k2=kpExt.decryptEncryptedKey(ek2);
if (Arrays.equals(k1.getMaterial(),k2.getMaterial())) {
fail("Generated EEKs should have different material!");
}
if (Arrays.equals(ek1.getEncryptedKeyIv(),ek2.getEncryptedKeyIv())) {
fail("Generated EEKs should have different IVs!");
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testEncryptDecrypt() throws Exception {
KeyProviderCryptoExtension.EncryptedKeyVersion eek=kpExt.generateEncryptedKey(encryptionKey.getName());
final byte[] encryptedKeyIv=eek.getEncryptedKeyIv();
final byte[] encryptedKeyMaterial=eek.getEncryptedKeyVersion().getMaterial();
Cipher cipher=Cipher.getInstance("AES/CTR/NoPadding");
cipher.init(Cipher.DECRYPT_MODE,new SecretKeySpec(encryptionKey.getMaterial(),"AES"),new IvParameterSpec(KeyProviderCryptoExtension.EncryptedKeyVersion.deriveIV(encryptedKeyIv)));
final byte[] manualMaterial=cipher.doFinal(encryptedKeyMaterial);
EncryptedKeyVersion eek2=EncryptedKeyVersion.createForDecryption(eek.getEncryptionKeyVersionName(),eek.getEncryptedKeyIv(),eek.getEncryptedKeyVersion().getMaterial());
KeyVersion decryptedKey=kpExt.decryptEncryptedKey(eek2);
final byte[] apiMaterial=decryptedKey.getMaterial();
assertArrayEquals("Wrong key material from decryptEncryptedKey",manualMaterial,apiMaterial);
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testInvalidProvider() throws Exception {
final String[] args1={"create","key1","-cipher","AES","-provider","sdff://file/tmp/keystore.jceks"};
int rc=0;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
rc=ks.run(args1);
assertEquals(1,rc);
assertTrue(outContent.toString().contains("There are no valid " + "KeyProviders configured."));
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testInvalidKeySize() throws Exception {
final String[] args1={"create","key1","-size","56","-provider",jceksProvider};
int rc=0;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
rc=ks.run(args1);
assertEquals(1,rc);
assertTrue(outContent.toString().contains("key1 has not been created."));
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testTransientProviderOnlyConfig() throws Exception {
final String[] args1={"create","key1"};
int rc=0;
KeyShell ks=new KeyShell();
Configuration config=new Configuration();
config.set(KeyProviderFactory.KEY_PROVIDER_PATH,"user:///");
ks.setConf(config);
rc=ks.run(args1);
assertEquals(1,rc);
assertTrue(outContent.toString().contains("There are no valid " + "KeyProviders configured."));
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testKeySuccessfulKeyLifecycle() throws Exception {
int rc=0;
String keyName="key1";
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
outContent.reset();
final String[] args1={"create",keyName,"-provider",jceksProvider};
rc=ks.run(args1);
assertEquals(0,rc);
assertTrue(outContent.toString().contains(keyName + " has been " + "successfully created"));
String listOut=listKeys(ks,false);
assertTrue(listOut.contains(keyName));
listOut=listKeys(ks,true);
assertTrue(listOut.contains(keyName));
assertTrue(listOut.contains("description"));
assertTrue(listOut.contains("created"));
outContent.reset();
final String[] args2={"roll",keyName,"-provider",jceksProvider};
rc=ks.run(args2);
assertEquals(0,rc);
assertTrue(outContent.toString().contains("key1 has been successfully " + "rolled."));
deleteKey(ks,keyName);
listOut=listKeys(ks,false);
assertFalse(listOut,listOut.contains(keyName));
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testAttributes() throws Exception {
int rc;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
final String[] args1={"create","keyattr1","-provider",jceksProvider,"-attr","foo=bar"};
rc=ks.run(args1);
assertEquals(0,rc);
assertTrue(outContent.toString().contains("keyattr1 has been " + "successfully created"));
String listOut=listKeys(ks,true);
assertTrue(listOut.contains("keyattr1"));
assertTrue(listOut.contains("attributes: [foo=bar]"));
outContent.reset();
final String[] args2={"create","keyattr2","-provider",jceksProvider,"-attr","=bar"};
rc=ks.run(args2);
assertEquals(1,rc);
outContent.reset();
args2[5]="foo";
rc=ks.run(args2);
assertEquals(1,rc);
outContent.reset();
args2[5]="=";
rc=ks.run(args2);
assertEquals(1,rc);
outContent.reset();
args2[5]="a=b=c";
rc=ks.run(args2);
assertEquals(0,rc);
listOut=listKeys(ks,true);
assertTrue(listOut.contains("keyattr2"));
assertTrue(listOut.contains("attributes: [a=b=c]"));
outContent.reset();
final String[] args3={"create","keyattr3","-provider",jceksProvider,"-attr","foo = bar","-attr"," glarch =baz ","-attr","abc=def"};
rc=ks.run(args3);
assertEquals(0,rc);
listOut=listKeys(ks,true);
assertTrue(listOut.contains("keyattr3"));
assertTrue(listOut.contains("[foo=bar]"));
assertTrue(listOut.contains("[glarch=baz]"));
assertTrue(listOut.contains("[abc=def]"));
outContent.reset();
final String[] args4={"create","keyattr4","-provider",jceksProvider,"-attr","foo=bar","-attr","foo=glarch"};
rc=ks.run(args4);
assertEquals(1,rc);
deleteKey(ks,"keyattr1");
deleteKey(ks,"keyattr2");
deleteKey(ks,"keyattr3");
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testInvalidCipher() throws Exception {
final String[] args1={"create","key1","-cipher","LJM","-provider",jceksProvider};
int rc=0;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
rc=ks.run(args1);
assertEquals(1,rc);
assertTrue(outContent.toString().contains("key1 has not been created."));
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testKeySuccessfulCreationWithDescription() throws Exception {
outContent.reset();
final String[] args1={"create","key1","-provider",jceksProvider,"-description","someDescription"};
int rc=0;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
rc=ks.run(args1);
assertEquals(0,rc);
assertTrue(outContent.toString().contains("key1 has been successfully " + "created"));
String listOut=listKeys(ks,true);
assertTrue(listOut.contains("description"));
assertTrue(listOut.contains("someDescription"));
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testTransientProviderWarning() throws Exception {
final String[] args1={"create","key1","-cipher","AES","-provider","user:///"};
int rc=0;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
rc=ks.run(args1);
assertEquals(0,rc);
assertTrue(outContent.toString().contains("WARNING: you are modifying a " + "transient provider."));
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testFullCipher() throws Exception {
final String keyName="key1";
final String[] args1={"create",keyName,"-cipher","AES/CBC/pkcs5Padding","-provider",jceksProvider};
int rc=0;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
rc=ks.run(args1);
assertEquals(0,rc);
assertTrue(outContent.toString().contains(keyName + " has been " + "successfully created"));
deleteKey(ks,keyName);
}
APIUtilityVerifierBranchVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testUnsupportedSymlink() throws IOException {
Path file=getTestRootPath(fc,"file");
Path link=getTestRootPath(fc,"linkToFile");
if (!fc.getDefaultFileSystem().supportsSymlinks()) {
try {
fc.createSymlink(file,link,false);
Assert.fail("Created a symlink on a file system that " + "does not support symlinks.");
}
catch ( IOException e) {
}
createFile(file);
try {
fc.getLinkTarget(file);
Assert.fail("Got a link target on a file system that " + "does not support symlinks.");
}
catch ( IOException e) {
}
Assert.assertEquals(fc.getFileStatus(file),fc.getFileLinkStatus(file));
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testGetFileContext1() throws IOException {
final Path rootPath=getTestRootPath(fc,"test");
AbstractFileSystem asf=fc.getDefaultFileSystem();
FileContext fc2=FileContext.getFileContext(asf);
final Path path=new Path(rootPath,"zoo");
FSDataOutputStream out=fc2.create(path,EnumSet.of(CREATE),Options.CreateOpts.createParent());
out.close();
Path pathResolved=fc2.resolvePath(path);
assertEquals(pathResolved.toUri().getPath(),path.toUri().getPath());
}
EqualityVerifier
@Test public void testToStringNoQuota(){
long length=11111;
long fileCount=22222;
long directoryCount=33333;
ContentSummary contentSummary=new ContentSummary(length,fileCount,directoryCount);
String expected=" none inf none" + " inf 33333 22222 11111 ";
assertEquals(expected,contentSummary.toString(true));
}
EqualityVerifier
@Test public void testToStringNoShowQuota(){
long length=11111;
long fileCount=22222;
long directoryCount=33333;
long quota=44444;
long spaceConsumed=55555;
long spaceQuota=66665;
ContentSummary contentSummary=new ContentSummary(length,fileCount,directoryCount,quota,spaceConsumed,spaceQuota);
String expected=" 33333 22222 11111 ";
assertEquals(expected,contentSummary.toString(false));
}
InternalCallVerifierEqualityVerifier
@Test public void testReadFields() throws IOException {
long length=11111;
long fileCount=22222;
long directoryCount=33333;
long quota=44444;
long spaceConsumed=55555;
long spaceQuota=66666;
ContentSummary contentSummary=new ContentSummary();
DataInput in=mock(DataInput.class);
when(in.readLong()).thenReturn(length).thenReturn(fileCount).thenReturn(directoryCount).thenReturn(quota).thenReturn(spaceConsumed).thenReturn(spaceQuota);
contentSummary.readFields(in);
assertEquals("getLength",length,contentSummary.getLength());
assertEquals("getFileCount",fileCount,contentSummary.getFileCount());
assertEquals("getDirectoryCount",directoryCount,contentSummary.getDirectoryCount());
assertEquals("getQuota",quota,contentSummary.getQuota());
assertEquals("getSpaceConsumed",spaceConsumed,contentSummary.getSpaceConsumed());
assertEquals("getSpaceQuota",spaceQuota,contentSummary.getSpaceQuota());
}
EqualityVerifier
@Test public void testToStringHumanNoShowQuota(){
long length=Long.MAX_VALUE;
long fileCount=222222222;
long directoryCount=33333;
long quota=222256578;
long spaceConsumed=55555;
long spaceQuota=Long.MAX_VALUE;
ContentSummary contentSummary=new ContentSummary(length,fileCount,directoryCount,quota,spaceConsumed,spaceQuota);
String expected=" 32.6 K 211.9 M 8.0 E ";
assertEquals(expected,contentSummary.toString(false,true));
}
InternalCallVerifierEqualityVerifier
@Test public void testConstructorNoQuota(){
long length=11111;
long fileCount=22222;
long directoryCount=33333;
ContentSummary contentSummary=new ContentSummary(length,fileCount,directoryCount);
assertEquals("getLength",length,contentSummary.getLength());
assertEquals("getFileCount",fileCount,contentSummary.getFileCount());
assertEquals("getDirectoryCount",directoryCount,contentSummary.getDirectoryCount());
assertEquals("getQuota",-1,contentSummary.getQuota());
assertEquals("getSpaceConsumed",length,contentSummary.getSpaceConsumed());
assertEquals("getSpaceQuota",-1,contentSummary.getSpaceQuota());
}
EqualityVerifier
@Test public void testGetHeaderWithQuota(){
String header=" name quota rem name quota space quota " + "rem space quota directories files bytes ";
assertEquals(header,ContentSummary.getHeader(true));
}
InternalCallVerifierEqualityVerifier
@Test public void testConstructorWithQuota(){
long length=11111;
long fileCount=22222;
long directoryCount=33333;
long quota=44444;
long spaceConsumed=55555;
long spaceQuota=66666;
ContentSummary contentSummary=new ContentSummary(length,fileCount,directoryCount,quota,spaceConsumed,spaceQuota);
assertEquals("getLength",length,contentSummary.getLength());
assertEquals("getFileCount",fileCount,contentSummary.getFileCount());
assertEquals("getDirectoryCount",directoryCount,contentSummary.getDirectoryCount());
assertEquals("getQuota",quota,contentSummary.getQuota());
assertEquals("getSpaceConsumed",spaceConsumed,contentSummary.getSpaceConsumed());
assertEquals("getSpaceQuota",spaceQuota,contentSummary.getSpaceQuota());
}
EqualityVerifier
@Test public void testToString(){
long length=11111;
long fileCount=22222;
long directoryCount=33333;
long quota=44444;
long spaceConsumed=55555;
long spaceQuota=66665;
ContentSummary contentSummary=new ContentSummary(length,fileCount,directoryCount,quota,spaceConsumed,spaceQuota);
String expected=" 44444 -11111 66665" + " 11110 33333 22222 11111 ";
assertEquals(expected,contentSummary.toString());
}
EqualityVerifier
@Test public void testToStringHumanWithQuota(){
long length=Long.MAX_VALUE;
long fileCount=222222222;
long directoryCount=33333;
long quota=222256578;
long spaceConsumed=1073741825;
long spaceQuota=1;
ContentSummary contentSummary=new ContentSummary(length,fileCount,directoryCount,quota,spaceConsumed,spaceQuota);
String expected=" 212.0 M 1023 1 " + " -1 G 32.6 K 211.9 M 8.0 E ";
assertEquals(expected,contentSummary.toString(true,true));
}
EqualityVerifier
@Test public void testToStringWithQuota(){
long length=11111;
long fileCount=22222;
long directoryCount=33333;
long quota=44444;
long spaceConsumed=55555;
long spaceQuota=66665;
ContentSummary contentSummary=new ContentSummary(length,fileCount,directoryCount,quota,spaceConsumed,spaceQuota);
String expected=" 44444 -11111 66665 11110" + " 33333 22222 11111 ";
assertEquals(expected,contentSummary.toString(true));
}
InternalCallVerifierEqualityVerifier
/**
* Check that the write and readField methods work correctly.
*/
@Test public void testFileStatusWritable() throws Exception {
FileStatus[] tests={new FileStatus(1,false,5,3,4,5,null,"","",new Path("/a/b")),new FileStatus(0,false,1,2,3,new Path("/")),new FileStatus(1,false,5,3,4,5,null,"","",new Path("/a/b"))};
LOG.info("Writing FileStatuses to a ByteArrayOutputStream");
ByteArrayOutputStream baos=new ByteArrayOutputStream();
DataOutput out=new DataOutputStream(baos);
for ( FileStatus fs : tests) {
fs.write(out);
}
LOG.info("Creating ByteArrayInputStream object");
DataInput in=new DataInputStream(new ByteArrayInputStream(baos.toByteArray()));
LOG.info("Testing if read objects are equal to written ones");
FileStatus dest=new FileStatus();
int iterator=0;
for ( FileStatus fs : tests) {
dest.readFields(in);
assertEquals("Different FileStatuses in iteration " + iterator,dest,fs);
iterator++;
}
}
EqualityVerifier
/**
* Check that FileStatus are equal if their paths are equal.
*/
@Test public void testEquals(){
Path path=new Path("path");
FileStatus fileStatus1=new FileStatus(1,true,1,1,1,1,FsPermission.valueOf("-rw-rw-rw-"),"one","one",null,path);
FileStatus fileStatus2=new FileStatus(2,true,2,2,2,2,FsPermission.valueOf("---x--x--x"),"two","two",null,path);
assertEquals(fileStatus1,fileStatus2);
}
Class: org.apache.hadoop.fs.TestFileSystemCaching
APIUtilityVerifierUtilityVerifierInternalCallVerifierIdentityVerifierEqualityVerifierHybridVerifier
@Test public void testDefaultFsUris() throws Exception {
final Configuration conf=new Configuration();
conf.set("fs.defaultfs.impl",DefaultFs.class.getName());
final URI defaultUri=URI.create("defaultfs://host");
FileSystem.setDefaultUri(conf,defaultUri);
FileSystem fs=null;
final FileSystem defaultFs=FileSystem.get(conf);
assertEquals(defaultUri,defaultFs.getUri());
fs=FileSystem.get(URI.create("defaultfs:/"),conf);
assertSame(defaultFs,fs);
fs=FileSystem.get(URI.create("defaultfs:///"),conf);
assertSame(defaultFs,fs);
fs=FileSystem.get(URI.create("defaultfs://host"),conf);
assertSame(defaultFs,fs);
fs=FileSystem.get(URI.create("defaultfs://host2"),conf);
assertNotSame(defaultFs,fs);
fs=FileSystem.get(URI.create("/"),conf);
assertSame(defaultFs,fs);
try {
fs=FileSystem.get(URI.create("//host"),conf);
fail("got fs with auth but no scheme");
}
catch ( Exception e) {
assertEquals("No FileSystem for scheme: null",e.getMessage());
}
try {
fs=FileSystem.get(URI.create("//host2"),conf);
fail("got fs with auth but no scheme");
}
catch ( Exception e) {
assertEquals("No FileSystem for scheme: null",e.getMessage());
}
}
InternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testFsWithMyOwnAndChildTokens() throws Exception {
Credentials credentials=new Credentials();
Text service1=new Text("singleTokenFs1");
Text service2=new Text("singleTokenFs2");
Text myService=new Text("multiTokenFs");
Token> token=mock(Token.class);
credentials.addToken(service2,token);
MockFileSystem fs1=createFileSystemForServiceName(service1);
MockFileSystem fs2=createFileSystemForServiceName(service2);
MockFileSystem multiFs=createFileSystemForServiceName(myService,fs1,fs2);
multiFs.addDelegationTokens(renewer,credentials);
verifyTokenFetch(multiFs,true);
verifyTokenFetch(fs1,true);
verifyTokenFetch(fs2,false);
assertEquals(3,credentials.numberOfTokens());
assertNotNull(credentials.getToken(myService));
assertNotNull(credentials.getToken(service1));
assertNotNull(credentials.getToken(service2));
}
Class: org.apache.hadoop.fs.TestFileUtil
APIUtilityVerifierUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testListAPI() throws IOException {
setupDirs();
String[] files=FileUtil.list(partitioned);
Assert.assertEquals("Unexpected number of pre-existing files",2,files.length);
File newDir=new File(tmp.getPath(),"test");
newDir.mkdir();
Assert.assertTrue("Failed to create test dir",newDir.exists());
files=FileUtil.list(newDir);
Assert.assertEquals("New directory unexpectedly contains files",0,files.length);
newDir.delete();
Assert.assertFalse("Failed to delete test dir",newDir.exists());
try {
files=FileUtil.list(newDir);
Assert.fail("IOException expected on list() for non-existent dir " + newDir.toString());
}
catch ( IOException ioe) {
}
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Tests if fullyDelete deletes
* (a) symlink to file only and not the file pointed to by symlink.
* (b) symlink to dir only and not the dir pointed to by symlink.
* @throws IOException
*/
@Test(timeout=30000) public void testFullyDeleteSymlinks() throws IOException {
setupDirs();
File link=new File(del,LINK);
Assert.assertEquals(5,del.list().length);
boolean ret=FileUtil.fullyDelete(link);
Assert.assertTrue(ret);
Assert.assertFalse(link.exists());
Assert.assertEquals(4,del.list().length);
validateTmpDir();
File linkDir=new File(del,"tmpDir");
ret=FileUtil.fullyDelete(linkDir);
Assert.assertTrue(ret);
Assert.assertFalse(linkDir.exists());
Assert.assertEquals(3,del.list().length);
validateTmpDir();
}
BranchVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that length on a symlink works as expected.
*/
@Test(timeout=30000) public void testSymlinkLength() throws Exception {
Assert.assertFalse(del.exists());
del.mkdirs();
byte[] data="testSymLinkData".getBytes();
File file=new File(del,FILE);
File link=new File(del,"_link");
FileOutputStream os=new FileOutputStream(file);
os.write(data);
os.close();
Assert.assertEquals(0,link.length());
FileUtil.symLink(file.getAbsolutePath(),link.getAbsolutePath());
Assert.assertEquals(data.length,file.length());
Assert.assertEquals(data.length,link.length());
file.delete();
Assert.assertFalse(file.exists());
if (Shell.WINDOWS && !Shell.isJava7OrAbove()) {
Assert.assertEquals(data.length,link.length());
}
else {
Assert.assertEquals(0,link.length());
}
link.delete();
Assert.assertFalse(link.exists());
}
APIUtilityVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test that getDU is able to handle cycles caused due to symbolic links
* and that directory sizes are not added to the final calculated size
* @throws IOException
*/
@Test(timeout=30000) public void testGetDU() throws Exception {
setupDirs();
long du=FileUtil.getDU(TEST_DIR);
final long expected=2 * (3 + System.getProperty("line.separator").length());
Assert.assertEquals(expected,du);
final File doesNotExist=new File(tmp,"QuickBrownFoxJumpsOverTheLazyDog");
long duDoesNotExist=FileUtil.getDU(doesNotExist);
assertEquals(0,duDoesNotExist);
File notADirectory=new File(partitioned,"part-r-00000");
long duNotADirectoryActual=FileUtil.getDU(notADirectory);
long duNotADirectoryExpected=3 + System.getProperty("line.separator").length();
assertEquals(duNotADirectoryExpected,duNotADirectoryActual);
try {
try {
FileUtil.chmod(notADirectory.getAbsolutePath(),"0000");
}
catch ( InterruptedException ie) {
assertNull(ie);
}
assertFalse(FileUtil.canRead(notADirectory));
final long du3=FileUtil.getDU(partitioned);
assertEquals(expected,du3);
try {
FileUtil.chmod(partitioned.getAbsolutePath(),"0000");
}
catch ( InterruptedException ie) {
assertNull(ie);
}
assertFalse(FileUtil.canRead(partitioned));
final long du4=FileUtil.getDU(partitioned);
assertEquals(0,du4);
}
finally {
FileUtil.chmod(partitioned.getAbsolutePath(),"0777",true);
}
}
APIUtilityVerifierUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testListFiles() throws IOException {
setupDirs();
File[] files=FileUtil.listFiles(partitioned);
Assert.assertEquals(2,files.length);
File newDir=new File(tmp.getPath(),"test");
newDir.mkdir();
Assert.assertTrue("Failed to create test dir",newDir.exists());
files=FileUtil.listFiles(newDir);
Assert.assertEquals(0,files.length);
newDir.delete();
Assert.assertFalse("Failed to delete test dir",newDir.exists());
try {
files=FileUtil.listFiles(newDir);
Assert.fail("IOException expected on listFiles() for non-existent dir " + newDir.toString());
}
catch ( IOException ioe) {
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testRmForceWithNonexistentGlob() throws Exception {
Configuration conf=new Configuration();
FsShell shell=new FsShell();
shell.setConf(conf);
final ByteArrayOutputStream bytes=new ByteArrayOutputStream();
final PrintStream err=new PrintStream(bytes);
final PrintStream oldErr=System.err;
System.setErr(err);
try {
int exit=shell.run(new String[]{"-rm","-f","nomatch*"});
assertEquals(0,exit);
assertTrue(bytes.toString().isEmpty());
}
finally {
IOUtils.closeStream(err);
System.setErr(oldErr);
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testGetWithInvalidSourcePathShouldNotDisplayNullInConsole() throws Exception {
Configuration conf=new Configuration();
FsShell shell=new FsShell();
shell.setConf(conf);
final ByteArrayOutputStream bytes=new ByteArrayOutputStream();
final PrintStream out=new PrintStream(bytes);
final PrintStream oldErr=System.err;
System.setErr(out);
final String results;
try {
Path tdir=new Path(TEST_ROOT_DIR,"notNullCopy");
fileSys.delete(tdir,true);
fileSys.mkdirs(tdir);
String[] args=new String[3];
args[0]="-get";
args[1]=new Path(tdir.toUri().getPath(),"/invalidSrc").toString();
args[2]=new Path(tdir.toUri().getPath(),"/invalidDst").toString();
assertTrue("file exists",!fileSys.exists(new Path(args[1])));
assertTrue("file exists",!fileSys.exists(new Path(args[2])));
int run=shell.run(args);
results=bytes.toString();
assertEquals("Return code should be 1",1,run);
assertTrue(" Null is coming when source path is invalid. ",!results.contains("get: null"));
assertTrue(" Not displaying the intended message ",results.contains("get: `" + args[1] + "': No such file or directory"));
}
finally {
IOUtils.closeStream(out);
System.setErr(oldErr);
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testRmWithNonexistentGlob() throws Exception {
Configuration conf=new Configuration();
FsShell shell=new FsShell();
shell.setConf(conf);
final ByteArrayOutputStream bytes=new ByteArrayOutputStream();
final PrintStream err=new PrintStream(bytes);
final PrintStream oldErr=System.err;
System.setErr(err);
final String results;
try {
int exit=shell.run(new String[]{"-rm","nomatch*"});
assertEquals(1,exit);
results=bytes.toString();
assertTrue(results.contains("rm: `nomatch*': No such file or directory"));
}
finally {
IOUtils.closeStream(err);
System.setErr(oldErr);
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test Chmod 1. Create and write file on FS 2. Verify that exit code for
* chmod on existing file is 0 3. Verify that exit code for chmod on
* non-existing file is 1 4. Verify that exit code for chmod with glob input
* on non-existing file is 1 5. Verify that exit code for chmod with glob
* input on existing file in 0
* @throws Exception
*/
@Test(timeout=30000) public void testChmod() throws Exception {
Path p1=new Path(TEST_ROOT_DIR,"testChmod/fileExists");
final String f1=p1.toUri().getPath();
final String f2=new Path(TEST_ROOT_DIR,"testChmod/fileDoesNotExist").toUri().getPath();
final String f3=new Path(TEST_ROOT_DIR,"testChmod/nonExistingfiles*").toUri().getPath();
final Path p4=new Path(TEST_ROOT_DIR,"testChmod/file1");
final Path p5=new Path(TEST_ROOT_DIR,"testChmod/file2");
final Path p6=new Path(TEST_ROOT_DIR,"testChmod/file3");
final String f7=new Path(TEST_ROOT_DIR,"testChmod/file*").toUri().getPath();
writeFile(fileSys,p1);
assertTrue(fileSys.exists(p1));
String argv[]={"-chmod","777",f1};
assertEquals(0,fsShell.run(argv));
String argv2[]={"-chmod","777",f2};
assertEquals(1,fsShell.run(argv2));
String argv3[]={"-chmod","777",f3};
assertEquals(1,fsShell.run(argv3));
writeFile(fileSys,p4);
assertTrue(fileSys.exists(p4));
writeFile(fileSys,p5);
assertTrue(fileSys.exists(p5));
writeFile(fileSys,p6);
assertTrue(fileSys.exists(p6));
String argv4[]={"-chmod","777",f7};
assertEquals(0,fsShell.run(argv4));
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testInterrupt() throws Exception {
MyFsShell shell=new MyFsShell();
shell.setConf(new Configuration());
final Path d=new Path(TEST_ROOT_DIR,"testInterrupt");
final Path f1=new Path(d,"f1");
final Path f2=new Path(d,"f2");
assertTrue(fileSys.mkdirs(d));
writeFile(fileSys,f1);
assertTrue(fileSys.isFile(f1));
writeFile(fileSys,f2);
assertTrue(fileSys.isFile(f2));
int exitCode=shell.run(new String[]{"-testInterrupt",f1.toString(),f2.toString()});
assertEquals(1,InterruptCommand.processed);
assertEquals(130,exitCode);
exitCode=shell.run(new String[]{"-testInterrupt",d.toString()});
assertEquals(2,InterruptCommand.processed);
assertEquals(130,exitCode);
}
APIUtilityVerifierEqualityVerifier
/**
* Test createHardLinkMult() with empty list of files.
* We use an extended version of the method call, that
* returns the number of System exec calls made, which should
* be zero in this case.
*/
@Test public void testCreateHardLinkMultEmptyList() throws IOException {
String[] emptyList={};
int callCount=createHardLinkMult(src,emptyList,tgt_mult,getMaxAllowedCmdArgLength());
assertEquals(0,callCount);
validateSetup();
}
BooleanVerifierEqualityVerifierHybridVerifier
/**
* Test the single-file method HardLink.createHardLink().
* Also tests getLinkCount() with values greater than one.
*/
@Test public void testCreateHardLink() throws IOException {
createHardLink(x1,x1_one);
assertTrue(x1_one.exists());
assertEquals(2,getLinkCount(x1));
assertEquals(2,getLinkCount(x1_one));
assertEquals(1,getLinkCount(x2));
createHardLink(x2,y_one);
createHardLink(x3,x3_one);
assertEquals(2,getLinkCount(x2));
assertEquals(2,getLinkCount(x3));
createHardLink(x1,x11_one);
assertEquals(3,getLinkCount(x1));
assertEquals(3,getLinkCount(x1_one));
assertEquals(3,getLinkCount(x11_one));
validateTgtOne();
appendToFile(x1_one,str3);
assertTrue(fetchFileContents(x1_one).equals(str1 + str3));
assertTrue(fetchFileContents(x11_one).equals(str1 + str3));
assertTrue(fetchFileContents(x1).equals(str1 + str3));
}
EqualityVerifier
/**
* Sanity check the simplest case of HardLink.getLinkCount()
* to make sure we get back "1" for ordinary single-linked files.
* Tests with multiply-linked files are in later test cases.
*/
@Test public void testGetLinkCount() throws IOException {
assertEquals(1,getLinkCount(x1));
assertEquals(1,getLinkCount(x2));
assertEquals(1,getLinkCount(x3));
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test createHardLinkMult(), again, this time with the "too long list"
* case where the total size of the command line arguments exceed the
* allowed maximum. In this case, the list should be automatically
* broken up into chunks, each chunk no larger than the max allowed.
* We use an extended version of the method call, specifying the
* size limit explicitly, to simulate the "too long" list with a
* relatively short list.
*/
@Test public void testCreateHardLinkMultOversizeAndEmpty() throws IOException {
String name1="x11111111";
String name2="x22222222";
String name3="x33333333";
File x1_long=new File(src,name1);
File x2_long=new File(src,name2);
File x3_long=new File(src,name3);
x1.renameTo(x1_long);
x2.renameTo(x2_long);
x3.renameTo(x3_long);
assertTrue(x1_long.exists());
assertTrue(x2_long.exists());
assertTrue(x3_long.exists());
assertFalse(x1.exists());
assertFalse(x2.exists());
assertFalse(x3.exists());
int callCount;
String[] emptyList={};
String[] fileNames=src.list();
int overhead=getLinkMultArgLength(src,emptyList,tgt_mult);
int maxLength=overhead + (int)(2.5 * (float)(1 + name1.length()));
callCount=createHardLinkMult(src,fileNames,tgt_mult,maxLength);
assertEquals(2,callCount);
String[] tgt_multNames=tgt_mult.list();
Arrays.sort(fileNames);
Arrays.sort(tgt_multNames);
assertArrayEquals(fileNames,tgt_multNames);
FileUtil.fullyDelete(tgt_mult);
assertFalse(tgt_mult.exists());
tgt_mult.mkdirs();
assertTrue(tgt_mult.exists() && tgt_mult.list().length == 0);
maxLength=overhead + (int)(0.5 * (float)(1 + name1.length()));
callCount=createHardLinkMult(src,fileNames,tgt_mult,maxLength);
assertEquals(3,callCount);
tgt_multNames=tgt_mult.list();
Arrays.sort(fileNames);
Arrays.sort(tgt_multNames);
assertArrayEquals(fileNames,tgt_multNames);
}
Class: org.apache.hadoop.fs.TestListFiles
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test when input path is a file
*/
@Test public void testFile() throws IOException {
fs.mkdirs(TEST_DIR);
writeFile(fs,FILE1,FILE_LEN);
RemoteIterator itor=fs.listFiles(FILE1,true);
LocatedFileStatus stat=itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN,stat.getLen());
assertEquals(fs.makeQualified(FILE1),stat.getPath());
assertEquals(1,stat.getBlockLocations().length);
itor=fs.listFiles(FILE1,false);
stat=itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN,stat.getLen());
assertEquals(fs.makeQualified(FILE1),stat.getPath());
assertEquals(1,stat.getBlockLocations().length);
fs.delete(FILE1,true);
}
UtilityVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testShouldNotthrowNPE() throws Exception {
Configuration conf1=new Configuration();
try {
dirAllocator.getLocalPathForWrite("/test",conf1);
fail("Exception not thrown when " + CONTEXT + " is not set");
}
catch ( IOException e) {
assertEquals(CONTEXT + " not configured",e.getMessage());
}
catch ( NullPointerException e) {
fail("Lack of configuration should not have thrown an NPE.");
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Two buffer dirs. The first dir does not exist & is on a read-only disk;
* The second dir exists & is RW
* getLocalPathForWrite with checkAccess set to false should create a parent
* directory. With checkAccess true, the directory should not be created.
* @throws Exception
*/
@Test(timeout=30000) public void testLocalPathForWriteDirCreation() throws IOException {
String dir0=buildBufferDir(ROOT,0);
String dir1=buildBufferDir(ROOT,1);
try {
conf.set(CONTEXT,dir0 + "," + dir1);
assertTrue(localFs.mkdirs(new Path(dir1)));
BUFFER_ROOT.setReadOnly();
Path p1=dirAllocator.getLocalPathForWrite("p1/x",SMALL_FILE_SIZE,conf);
assertTrue(localFs.getFileStatus(p1.getParent()).isDirectory());
Path p2=dirAllocator.getLocalPathForWrite("p2/x",SMALL_FILE_SIZE,conf,false);
try {
localFs.getFileStatus(p2.getParent());
}
catch ( Exception e) {
assertEquals(e.getClass(),FileNotFoundException.class);
}
}
finally {
Shell.execCommand(Shell.getSetPermissionCommand("u+w",false,BUFFER_DIR_ROOT));
rmBufferDirs();
}
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test opening and reading from an InputStream through a hdfs:// URL.
*
* First generate a file with some content through the FileSystem API, then
* try to open and read the file through the URL stream API.
* @throws IOException
*/
@Test public void testDfsUrls() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs=cluster.getFileSystem();
FsUrlStreamHandlerFactory factory=new org.apache.hadoop.fs.FsUrlStreamHandlerFactory();
java.net.URL.setURLStreamHandlerFactory(factory);
Path filePath=new Path("/thefile");
try {
byte[] fileContent=new byte[1024];
for (int i=0; i < fileContent.length; ++i) fileContent[i]=(byte)i;
OutputStream os=fs.create(filePath);
os.write(fileContent);
os.close();
URI uri=fs.getUri();
URL fileURL=new URL(uri.getScheme(),uri.getHost(),uri.getPort(),filePath.toString());
InputStream is=fileURL.openStream();
assertNotNull(is);
byte[] bytes=new byte[4096];
assertEquals(1024,is.read(bytes));
is.close();
for (int i=0; i < fileContent.length; ++i) assertEquals(fileContent[i],bytes[i]);
fs.delete(filePath,false);
}
finally {
fs.close();
cluster.shutdown();
}
}
APIUtilityVerifierIterativeVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test opening and reading from an InputStream through a file:// URL.
* @throws IOException
* @throws URISyntaxException
*/
@Test public void testFileUrls() throws IOException, URISyntaxException {
Configuration conf=new HdfsConfiguration();
if (!TEST_ROOT_DIR.exists()) {
if (!TEST_ROOT_DIR.mkdirs()) throw new IOException("Cannot create temporary directory: " + TEST_ROOT_DIR);
}
File tmpFile=new File(TEST_ROOT_DIR,"thefile");
URI uri=tmpFile.toURI();
FileSystem fs=FileSystem.get(uri,conf);
try {
byte[] fileContent=new byte[1024];
for (int i=0; i < fileContent.length; ++i) fileContent[i]=(byte)i;
OutputStream os=fs.create(new Path(uri.getPath()));
os.write(fileContent);
os.close();
URL fileURL=uri.toURL();
InputStream is=fileURL.openStream();
assertNotNull(is);
byte[] bytes=new byte[4096];
assertEquals(1024,is.read(bytes));
is.close();
for (int i=0; i < fileContent.length; ++i) assertEquals(fileContent[i],bytes[i]);
fs.delete(new Path(uri.getPath()),false);
}
finally {
fs.close();
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Tests that WASB works well with an older version container with ASV-era
* version and metadata.
*/
@Test public void testFirstContainerVersionMetadata() throws Exception {
HashMap containerMetadata=new HashMap();
containerMetadata.put(AzureNativeFileSystemStore.OLD_VERSION_METADATA_KEY,AzureNativeFileSystemStore.FIRST_WASB_VERSION);
FsWithPreExistingContainer fsWithContainer=FsWithPreExistingContainer.create(containerMetadata);
assertFalse(fsWithContainer.getFs().exists(new Path("/IDontExist")));
assertEquals(0,fsWithContainer.getFs().listStatus(new Path("/")).length);
assertEquals(AzureNativeFileSystemStore.FIRST_WASB_VERSION,fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.OLD_VERSION_METADATA_KEY));
assertNull(fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.VERSION_METADATA_KEY));
fsWithContainer.getFs().mkdirs(new Path("/dir"));
assertEquals(AzureNativeFileSystemStore.CURRENT_WASB_VERSION,fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.VERSION_METADATA_KEY));
assertNull(fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.OLD_VERSION_METADATA_KEY));
fsWithContainer.close();
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Tests that WASB stamped the version in the container metadata if it does a
* write operation to a pre-existing container.
*/
@Test public void testPreExistingContainerVersionMetadata() throws Exception {
FsWithPreExistingContainer fsWithContainer=FsWithPreExistingContainer.create();
assertFalse(fsWithContainer.getFs().exists(new Path("/IDontExist")));
assertEquals(0,fsWithContainer.getFs().listStatus(new Path("/")).length);
assertNull(fsWithContainer.getContainerMetadata());
fsWithContainer.getFs().mkdirs(new Path("/dir"));
assertNotNull(fsWithContainer.getContainerMetadata());
assertEquals(AzureNativeFileSystemStore.CURRENT_WASB_VERSION,fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.VERSION_METADATA_KEY));
fsWithContainer.close();
}
InternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Tests that WASB stamped the version in the container metadata.
*/
@Test public void testContainerVersionMetadata() throws Exception {
fs.createNewFile(new Path("/foo"));
HashMap containerMetadata=backingStore.getContainerMetadata();
assertNotNull(containerMetadata);
assertEquals(AzureNativeFileSystemStore.CURRENT_WASB_VERSION,containerMetadata.get(AzureNativeFileSystemStore.VERSION_METADATA_KEY));
}
InternalCallVerifierEqualityVerifier
/**
* Test to make sure that we don't expose the temporary upload folder when
* listing at the root.
*/
@Test public void testNoTempBlobsVisible() throws Exception {
Path filePath=new Path("/inProgress");
FSDataOutputStream outputStream=fs.create(filePath);
FileStatus[] listOfRoot=fs.listStatus(new Path("/"));
assertEquals("Expected one file listed, instead got: " + toString(listOfRoot),1,listOfRoot.length);
assertEquals(fs.makeQualified(filePath),listOfRoot[0].getPath());
outputStream.close();
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testMetricsOnFileRename() throws Exception {
long base=getBaseWebResponses();
Path originalPath=new Path("/metricsTest_RenameStart");
Path destinationPath=new Path("/metricsTest_RenameFinal");
assertEquals(0,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_FILES_CREATED));
assertTrue(fs.createNewFile(originalPath));
logOpResponseCount("Creating an empty file",base);
base=assertWebResponsesInRange(base,2,20);
assertEquals(1,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_FILES_CREATED));
assertTrue(fs.rename(originalPath,destinationPath));
logOpResponseCount("Renaming a file",base);
base=assertWebResponsesInRange(base,2,15);
assertNoErrors();
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testMetricsOnFileCreateRead() throws Exception {
long base=getBaseWebResponses();
assertEquals(0,AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation()));
Path filePath=new Path("/metricsTest_webResponses");
final int FILE_SIZE=1000;
getBandwidthGaugeUpdater().suppressAutoUpdate();
Date start=new Date();
OutputStream outputStream=fs.create(filePath);
outputStream.write(nonZeroByteArray(FILE_SIZE));
outputStream.close();
long uploadDurationMs=new Date().getTime() - start.getTime();
logOpResponseCount("Creating a 1K file",base);
base=assertWebResponsesInRange(base,2,15);
getBandwidthGaugeUpdater().triggerUpdate(true);
long bytesWritten=AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation());
assertTrue("The bytes written in the last second " + bytesWritten + " is pretty far from the expected range of around "+ FILE_SIZE+ " bytes plus a little overhead.",bytesWritten > (FILE_SIZE / 2) && bytesWritten < (FILE_SIZE * 2));
long totalBytesWritten=AzureMetricsTestUtil.getCurrentTotalBytesWritten(getInstrumentation());
assertTrue("The total bytes written " + totalBytesWritten + " is pretty far from the expected range of around "+ FILE_SIZE+ " bytes plus a little overhead.",totalBytesWritten >= FILE_SIZE && totalBytesWritten < (FILE_SIZE * 2));
long uploadRate=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_UPLOAD_RATE);
System.out.println("Upload rate: " + uploadRate + " bytes/second.");
long expectedRate=(FILE_SIZE * 1000L) / uploadDurationMs;
assertTrue("The upload rate " + uploadRate + " is below the expected range of around "+ expectedRate+ " bytes/second that the unit test observed. This should never be"+ " the case since the test underestimates the rate by looking at "+ " end-to-end time instead of just block upload time.",uploadRate >= expectedRate);
long uploadLatency=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_UPLOAD_LATENCY);
System.out.println("Upload latency: " + uploadLatency);
long expectedLatency=uploadDurationMs;
assertTrue("The upload latency " + uploadLatency + " should be greater than zero now that I've just uploaded a file.",uploadLatency > 0);
assertTrue("The upload latency " + uploadLatency + " is more than the expected range of around "+ expectedLatency+ " milliseconds that the unit test observed. This should never be"+ " the case since the test overestimates the latency by looking at "+ " end-to-end time instead of just block upload time.",uploadLatency <= expectedLatency);
start=new Date();
InputStream inputStream=fs.open(filePath);
int count=0;
while (inputStream.read() >= 0) {
count++;
}
inputStream.close();
long downloadDurationMs=new Date().getTime() - start.getTime();
assertEquals(FILE_SIZE,count);
logOpResponseCount("Reading a 1K file",base);
base=assertWebResponsesInRange(base,1,10);
getBandwidthGaugeUpdater().triggerUpdate(false);
long totalBytesRead=AzureMetricsTestUtil.getCurrentTotalBytesRead(getInstrumentation());
assertEquals(FILE_SIZE,totalBytesRead);
long bytesRead=AzureMetricsTestUtil.getCurrentBytesRead(getInstrumentation());
assertTrue("The bytes read in the last second " + bytesRead + " is pretty far from the expected range of around "+ FILE_SIZE+ " bytes plus a little overhead.",bytesRead > (FILE_SIZE / 2) && bytesRead < (FILE_SIZE * 2));
long downloadRate=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_DOWNLOAD_RATE);
System.out.println("Download rate: " + downloadRate + " bytes/second.");
expectedRate=(FILE_SIZE * 1000L) / downloadDurationMs;
assertTrue("The download rate " + downloadRate + " is below the expected range of around "+ expectedRate+ " bytes/second that the unit test observed. This should never be"+ " the case since the test underestimates the rate by looking at "+ " end-to-end time instead of just block download time.",downloadRate >= expectedRate);
long downloadLatency=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_DOWNLOAD_LATENCY);
System.out.println("Download latency: " + downloadLatency);
expectedLatency=downloadDurationMs;
assertTrue("The download latency " + downloadLatency + " should be greater than zero now that I've just downloaded a file.",downloadLatency > 0);
assertTrue("The download latency " + downloadLatency + " is more than the expected range of around "+ expectedLatency+ " milliseconds that the unit test observed. This should never be"+ " the case since the test overestimates the latency by looking at "+ " end-to-end time instead of just block download time.",downloadLatency <= expectedLatency);
assertNoErrors();
}
BooleanVerifierEqualityVerifierHybridVerifier
@Test public void testMetricsOnBigFileCreateRead() throws Exception {
long base=getBaseWebResponses();
assertEquals(0,AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation()));
Path filePath=new Path("/metricsTest_webResponses");
final int FILE_SIZE=100 * 1024 * 1024;
getBandwidthGaugeUpdater().suppressAutoUpdate();
OutputStream outputStream=fs.create(filePath);
outputStream.write(new byte[FILE_SIZE]);
outputStream.close();
logOpResponseCount("Creating a 100 MB file",base);
base=assertWebResponsesInRange(base,20,50);
getBandwidthGaugeUpdater().triggerUpdate(true);
long totalBytesWritten=AzureMetricsTestUtil.getCurrentTotalBytesWritten(getInstrumentation());
assertTrue("The total bytes written " + totalBytesWritten + " is pretty far from the expected range of around "+ FILE_SIZE+ " bytes plus a little overhead.",totalBytesWritten >= FILE_SIZE && totalBytesWritten < (FILE_SIZE * 2));
long uploadRate=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_UPLOAD_RATE);
System.out.println("Upload rate: " + uploadRate + " bytes/second.");
long uploadLatency=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_UPLOAD_LATENCY);
System.out.println("Upload latency: " + uploadLatency);
assertTrue("The upload latency " + uploadLatency + " should be greater than zero now that I've just uploaded a file.",uploadLatency > 0);
InputStream inputStream=fs.open(filePath);
int count=0;
while (inputStream.read() >= 0) {
count++;
}
inputStream.close();
assertEquals(FILE_SIZE,count);
logOpResponseCount("Reading a 100 MB file",base);
base=assertWebResponsesInRange(base,20,40);
getBandwidthGaugeUpdater().triggerUpdate(false);
long totalBytesRead=AzureMetricsTestUtil.getCurrentTotalBytesRead(getInstrumentation());
assertEquals(FILE_SIZE,totalBytesRead);
long downloadRate=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_DOWNLOAD_RATE);
System.out.println("Download rate: " + downloadRate + " bytes/second.");
long downloadLatency=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_DOWNLOAD_LATENCY);
System.out.println("Download latency: " + downloadLatency);
assertTrue("The download latency " + downloadLatency + " should be greater than zero now that I've just downloaded a file.",downloadLatency > 0);
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testMetricsOnFileExistsDelete() throws Exception {
long base=getBaseWebResponses();
Path filePath=new Path("/metricsTest_delete");
assertFalse(fs.exists(filePath));
logOpResponseCount("Checking file existence for non-existent file",base);
base=assertWebResponsesInRange(base,1,3);
assertTrue(fs.createNewFile(filePath));
base=getCurrentWebResponses();
assertTrue(fs.exists(filePath));
logOpResponseCount("Checking file existence for existent file",base);
base=assertWebResponsesInRange(base,1,2);
assertEquals(0,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_FILES_DELETED));
assertTrue(fs.delete(filePath,false));
logOpResponseCount("Deleting a file",base);
base=assertWebResponsesInRange(base,1,4);
assertEquals(1,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_FILES_DELETED));
assertNoErrors();
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Seek round a file bigger than IO buffers
* @throws Throwable
*/
@Test public void testSeekBigFile() throws Throwable {
describe("Seek round a large file and verify the bytes are what is expected");
Path testSeekFile=path("bigseekfile.txt");
byte[] block=dataset(65536,0,255);
createFile(getFileSystem(),testSeekFile,false,block);
instream=getFileSystem().open(testSeekFile);
assertEquals(0,instream.getPos());
instream.seek(0);
int result=instream.read();
assertEquals(0,result);
assertEquals(1,instream.read());
assertEquals(2,instream.read());
instream.seek(32768);
assertEquals("@32768",block[32768],(byte)instream.read());
instream.seek(40000);
assertEquals("@40000",block[40000],(byte)instream.read());
instream.seek(8191);
assertEquals("@8191",block[8191],(byte)instream.read());
instream.seek(0);
assertEquals("@0",0,(byte)instream.read());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testSeekAndReadPastEndOfFile() throws Throwable {
describe("verify that reading past the last bytes in the file returns -1");
instream=getFileSystem().open(smallSeekFile);
assertEquals(0,instream.getPos());
instream.seek(TEST_FILE_LEN - 2);
assertTrue("Premature EOF",instream.read() != -1);
assertTrue("Premature EOF",instream.read() != -1);
assertMinusOne("read past end of file",instream.read());
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testPositionedBulkReadDoesntChangePosition() throws Throwable {
describe("verify that a positioned read does not change the getPos() value");
Path testSeekFile=path("bigseekfile.txt");
byte[] block=dataset(65536,0,255);
createFile(getFileSystem(),testSeekFile,false,block);
instream=getFileSystem().open(testSeekFile);
instream.seek(39999);
assertTrue(-1 != instream.read());
assertEquals(40000,instream.getPos());
byte[] readBuffer=new byte[256];
instream.read(128,readBuffer,0,readBuffer.length);
assertEquals(40000,instream.getPos());
assertEquals("@40000",block[40000],(byte)instream.read());
for (int i=0; i < 256; i++) {
assertEquals("@" + i,block[i + 128],readBuffer[i]);
}
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testNegativeSeek() throws Throwable {
instream=getFileSystem().open(smallSeekFile);
assertEquals(0,instream.getPos());
try {
instream.seek(-1);
long p=instream.getPos();
LOG.warn("Seek to -1 returned a position of " + p);
int result=instream.read();
fail("expected an exception, got data " + result + " at a position of "+ p);
}
catch ( EOFException e) {
handleExpectedException(e);
}
catch ( IOException e) {
handleRelaxedException("a negative seek","EOFException",e);
}
assertEquals(0,instream.getPos());
}
APIUtilityVerifierEqualityVerifier
@Test public void testSeekZeroByteFile() throws Throwable {
describe("seek and read a 0 byte file");
instream=getFileSystem().open(zeroByteFile);
assertEquals(0,instream.getPos());
int result=instream.read();
assertMinusOne("initial byte read",result);
byte[] buffer=new byte[1];
instream.seek(0);
result=instream.read();
assertMinusOne("post-seek byte read",result);
result=instream.read(buffer,0,1);
assertMinusOne("post-seek buffer read",result);
}
APIUtilityVerifierEqualityVerifier
@Test public void testBlockReadZeroByteFile() throws Throwable {
describe("do a block read on a 0 byte file");
instream=getFileSystem().open(zeroByteFile);
assertEquals(0,instream.getPos());
byte[] buffer=new byte[1];
int result=instream.read(buffer,0,1);
assertMinusOne("block read zero byte file",result);
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test ACL operations on a directory, including default ACLs.
* General strategy is to use GETFILESTATUS and GETACLSTATUS to verify:
*
*
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Validate the various ACL set/modify/remove calls. General strategy is
* to verify each of the following steps with GETFILESTATUS, LISTSTATUS,
* and GETACLSTATUS:
*
*
InternalCallVerifierEqualityVerifier
@Test public void testLsNoRpcForGetAclStatus() throws Exception {
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"stubfs:///");
conf.setClass("fs.stubfs.impl",StubFileSystem.class,FileSystem.class);
conf.setBoolean("stubfs.noRpcForGetAclStatus",true);
assertEquals("ls must succeed even if getAclStatus RPC does not exist.",0,ToolRunner.run(conf,new FsShell(),new String[]{"-ls","/"}));
}
EqualityVerifier
@Test public void testMultipleAclSpecParsing() throws Exception {
List parsedList=AclEntry.parseAclSpec("group::rwx,user:user1:rwx,user:user2:rw-," + "group:group1:rw-,default:group:group1:rw-",true);
AclEntry basicAcl=new AclEntry.Builder().setType(AclEntryType.GROUP).setPermission(FsAction.ALL).build();
AclEntry user1Acl=new AclEntry.Builder().setType(AclEntryType.USER).setPermission(FsAction.ALL).setName("user1").build();
AclEntry user2Acl=new AclEntry.Builder().setType(AclEntryType.USER).setPermission(FsAction.READ_WRITE).setName("user2").build();
AclEntry group1Acl=new AclEntry.Builder().setType(AclEntryType.GROUP).setPermission(FsAction.READ_WRITE).setName("group1").build();
AclEntry defaultAcl=new AclEntry.Builder().setType(AclEntryType.GROUP).setPermission(FsAction.READ_WRITE).setName("group1").setScope(AclEntryScope.DEFAULT).build();
List expectedList=new ArrayList();
expectedList.add(basicAcl);
expectedList.add(user1Acl);
expectedList.add(user2Acl);
expectedList.add(group1Acl);
expectedList.add(defaultAcl);
assertEquals("Parsed Acl not correct",expectedList,parsedList);
}
InternalCallVerifierEqualityVerifier
@Test public void testLsAclsUnsupported() throws Exception {
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"stubfs:///");
conf.setClass("fs.stubfs.impl",StubFileSystem.class,FileSystem.class);
assertEquals("ls must succeed even if FileSystem does not implement ACLs.",0,ToolRunner.run(conf,new FsShell(),new String[]{"-ls","/"}));
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Seek past the buffer then read
* @throws Throwable problems
*/
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testSeekAndReadPastEndOfFile() throws Throwable {
instream=fs.open(readFile);
assertEquals(0,instream.getPos());
instream.seek(SEEK_FILE_LEN - 2);
assertTrue("Premature EOF",instream.read() != -1);
assertTrue("Premature EOF",instream.read() != -1);
assertMinusOne("read past end of file",instream.read());
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Seek past the buffer and attempt a read(buffer)
* @throws Throwable failures
*/
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testSeekBulkReadPastEndOfFile() throws Throwable {
instream=fs.open(readFile);
assertEquals(0,instream.getPos());
instream.seek(SEEK_FILE_LEN - 1);
byte[] buffer=new byte[1];
int result=instream.read(buffer,0,1);
result=instream.read(buffer,0,1);
assertMinusOne("read past end of file",result);
result=instream.read(buffer,0,1);
assertMinusOne("read past end of file",result);
result=instream.read(buffer,0,0);
assertEquals("EOF checks coming before read range check",0,result);
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testSeekAndReadPastEndOfFile() throws Throwable {
instream=fs.open(smallSeekFile);
assertEquals(0,instream.getPos());
instream.seek(SMALL_SEEK_FILE_LEN - 2);
assertTrue("Premature EOF",instream.read() != -1);
assertTrue("Premature EOF",instream.read() != -1);
assertMinusOne("read past end of file",instream.read());
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testNegativeSeek() throws Throwable {
instream=fs.open(smallSeekFile);
assertEquals(0,instream.getPos());
try {
instream.seek(-1);
long p=instream.getPos();
LOG.warn("Seek to -1 returned a position of " + p);
int result=instream.read();
fail("expected an exception, got data " + result + " at a position of "+ p);
}
catch ( IOException e) {
}
assertEquals(0,instream.getPos());
}
APIUtilityVerifierEqualityVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testOverwrite() throws Throwable {
Path path=new Path("/test/Overwrite");
try {
String text="Testing a put to a file " + System.currentTimeMillis();
writeTextFile(fs,path,text,false);
assertFileHasLength(fs,path,text.length());
String text2="Overwriting a file " + System.currentTimeMillis();
writeTextFile(fs,path,text2,true);
assertFileHasLength(fs,path,text2.length());
String result=readBytesToString(fs,path,text2.length());
assertEquals(text2,result);
}
finally {
delete(fs,path);
}
}
APIUtilityVerifierEqualityVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testPutGetFile() throws Throwable {
Path path=new Path("/test/PutGetFile");
try {
String text="Testing a put and get to a file " + System.currentTimeMillis();
writeTextFile(fs,path,text,false);
String result=readBytesToString(fs,path,text.length());
assertEquals(text,result);
}
finally {
delete(fs,path);
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* test that a dir off root has a listStatus() call that
* works as expected. and that when a child is added. it changes
* @throws Exception on failures
*/
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testDirectoriesOffRootHaveMatchingFileStatus() throws Exception {
Path test=path("/test");
fs.delete(test,true);
mkdirs(test);
assertExists("created test directory",test);
FileStatus[] statuses=fs.listStatus(test);
String statusString=statusToString(test.toString(),statuses);
assertEquals("Wrong number of elements in file status " + statusString,0,statuses.length);
Path src=path("/test/file");
SwiftTestUtils.touch(fs,src);
statuses=fs.listStatus(test);
statusString=statusToString(test.toString(),statuses);
assertEquals("Wrong number of elements in file status " + statusString,1,statuses.length);
SwiftFileStatus stat=(SwiftFileStatus)statuses[0];
assertTrue("isDir(): Not a directory: " + stat,stat.isDir());
extraStatusAssertions(stat);
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* test that a dir two levels down has a listStatus() call that
* works as expected.
* @throws Exception on failures
*/
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testDirectoriesLowerDownHaveMatchingFileStatus() throws Exception {
Path test=path("/test/testDirectoriesLowerDownHaveMatchingFileStatus");
fs.delete(test,true);
mkdirs(test);
assertExists("created test sub directory",test);
FileStatus[] statuses=fs.listStatus(test);
String statusString=statusToString(test.toString(),statuses);
assertEquals("Wrong number of elements in file status " + statusString,0,statuses.length);
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Assert that a filesystem is case sensitive.
* This is done by creating a mixed-case filename and asserting that
* its lower case version is not there.
* @throws Exception failures
*/
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testFilesystemIsCaseSensitive() throws Exception {
String mixedCaseFilename="/test/UPPER.TXT";
Path upper=path(mixedCaseFilename);
Path lower=path(mixedCaseFilename.toLowerCase(Locale.ENGLISH));
assertFalse("File exists" + upper,fs.exists(upper));
assertFalse("File exists" + lower,fs.exists(lower));
FSDataOutputStream out=fs.create(upper);
out.writeUTF("UPPER");
out.close();
FileStatus upperStatus=fs.getFileStatus(upper);
assertExists("Original upper case file" + upper,upper);
assertPathDoesNotExist("lower case file",lower);
out=fs.create(lower);
out.writeUTF("l");
out.close();
assertExists("lower case file",lower);
assertExists("Original upper case file " + upper,upper);
FileStatus newStatus=fs.getFileStatus(upper);
assertEquals("Expected status:" + upperStatus + " actual status "+ newStatus,upperStatus.getLen(),newStatus.getLen());
}
EqualityVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testListEmptyRoot() throws Throwable {
describe("Empty the root dir and verify that an LS / returns {}");
cleanup("testListEmptyRoot",fs,"/test");
cleanup("testListEmptyRoot",fs,"/user");
FileStatus[] fileStatuses=fs.listStatus(path("/"));
assertEquals("Non-empty root" + dumpStats("/",fileStatuses),0,fileStatuses.length);
}
APIUtilityVerifierEqualityVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testListStatusFile() throws Exception {
describe("Create a single file under /test;" + " assert that listStatus(/test) finds it");
Path file=path("/test/filename");
createFile(file);
FileStatus[] pathStats=fs.listStatus(file);
assertEquals(dumpStats("/test/",pathStats),1,pathStats.length);
FileStatus lsStat=pathStats[0];
assertEquals("Wrong file len in listing of " + lsStat,data.length,lsStat.getLen());
}
EqualityVerifier
@Test(timeout=SWIFT_BULK_IO_TEST_TIMEOUT) public void testPartitionPropertyPropagatesToConf() throws Throwable {
assertEquals(1,getConf().getInt(SwiftProtocolConstants.SWIFT_PARTITION_SIZE,0));
}
APIUtilityVerifierEqualityVerifier
/**
* Test that when a partitioned file is overwritten by a smaller one,
* all the old partitioned files go away
* @throws Throwable
*/
@Test(timeout=SWIFT_BULK_IO_TEST_TIMEOUT) public void testOverwritePartitionedFile() throws Throwable {
final Path path=new Path("/test/testOverwritePartitionedFile");
final int len1=8192;
final byte[] src1=SwiftTestUtils.dataset(len1,'A','Z');
FSDataOutputStream out=fs.create(path,false,getBufferSize(),(short)1,1024);
out.write(src1,0,len1);
out.close();
long expected=getExpectedPartitionsWritten(len1,PART_SIZE_BYTES,false);
assertPartitionsWritten("initial upload",out,expected);
assertExists("Exists",path);
FileStatus status=fs.getFileStatus(path);
assertEquals("Length",len1,status.getLen());
final int len2=4095;
final byte[] src2=SwiftTestUtils.dataset(len2,'a','z');
out=fs.create(path,true,getBufferSize(),(short)1,1024);
out.write(src2,0,len2);
out.close();
status=fs.getFileStatus(path);
assertEquals("Length",len2,status.getLen());
byte[] dest=readDataset(fs,path,len2);
SwiftTestUtils.compareByteArrays(src2,dest,len2);
}
EqualityVerifier
@Test(timeout=SWIFT_BULK_IO_TEST_TIMEOUT) public void testPartionPropertyPropagatesToStore() throws Throwable {
assertEquals(1,fs.getStore().getPartsizeKB());
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Test sticks up a very large partitioned file and verifies that
* it comes back unchanged.
* @throws Throwable
*/
@Test(timeout=SWIFT_BULK_IO_TEST_TIMEOUT) public void testManyPartitionedFile() throws Throwable {
final Path path=new Path("/test/testManyPartitionedFile");
int len=PART_SIZE_BYTES * 15;
final byte[] src=SwiftTestUtils.dataset(len,32,144);
FSDataOutputStream out=fs.create(path,false,getBufferSize(),(short)1,BLOCK_SIZE);
out.write(src,0,src.length);
int expected=getExpectedPartitionsWritten(len,PART_SIZE_BYTES,true);
out.close();
assertPartitionsWritten("write completed",out,expected);
assertEquals("too few bytes written",len,SwiftNativeFileSystem.getBytesWritten(out));
assertEquals("too few bytes uploaded",len,SwiftNativeFileSystem.getBytesUploaded(out));
byte[] dest=readDataset(fs,path,len);
SwiftTestUtils.compareByteArrays(src,dest,len);
FileStatus[] stats=fs.listStatus(path);
assertEquals("wrong entry count in " + SwiftTestUtils.dumpStats(path.toString(),stats),expected,stats.length);
}
APIUtilityVerifierEqualityVerifier
@Test(timeout=SWIFT_BULK_IO_TEST_TIMEOUT) public void testRenamePartitionedFile() throws Throwable {
Path src=new Path("/test/testRenamePartitionedFileSrc");
int len=data.length;
SwiftTestUtils.writeDataset(fs,src,data,len,1024,false);
assertExists("Exists",src);
String partOneName=SwiftUtils.partitionFilenameFromNumber(1);
Path srcPart=new Path(src,partOneName);
Path dest=new Path("/test/testRenamePartitionedFileDest");
Path destPart=new Path(src,partOneName);
assertExists("Partition Exists",srcPart);
fs.rename(src,dest);
assertPathExists(fs,"dest file missing",dest);
FileStatus status=fs.getFileStatus(dest);
assertEquals("Length of renamed file is wrong",len,status.getLen());
byte[] destData=readDataset(fs,dest,len);
SwiftTestUtils.compareByteArrays(data,destData,len);
String srcLs=SwiftTestUtils.ls(fs,src);
String destLs=SwiftTestUtils.ls(fs,dest);
assertPathDoesNotExist("deleted file still found in " + srcLs,src);
assertPathDoesNotExist("partition file still found in " + srcLs,srcPart);
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testRenameFile() throws Exception {
assumeRenameSupported();
final Path old=new Path("/test/alice/file");
final Path newPath=new Path("/test/bob/file");
fs.mkdirs(newPath.getParent());
final FSDataOutputStream fsDataOutputStream=fs.create(old);
final byte[] message="Some data".getBytes();
fsDataOutputStream.write(message);
fsDataOutputStream.close();
assertTrue(fs.exists(old));
rename(old,newPath,true,false,true);
final FSDataInputStream bobStream=fs.open(newPath);
final byte[] bytes=new byte[512];
final int read=bobStream.read(bytes);
bobStream.close();
final byte[] buffer=new byte[read];
System.arraycopy(bytes,0,buffer,0,read);
assertEquals(new String(message),new String(buffer));
}
APIUtilityVerifierEqualityVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testParseUrlPath() throws Exception {
final String pathString="swift://container.service1/home/user/files/file1";
final URI uri=new URI(pathString);
final Path path=new Path(pathString);
final SwiftObjectPath expected=SwiftObjectPath.fromPath(uri,path);
final SwiftObjectPath actual=new SwiftObjectPath(RestClientBindings.extractContainerName(uri),"/home/user/files/file1");
assertEquals(expected,actual);
}
APIUtilityVerifierEqualityVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testParsePath() throws Exception {
final String pathString="/home/user/files/file1";
final Path path=new Path(pathString);
final URI uri=new URI("http://container.localhost");
final SwiftObjectPath expected=SwiftObjectPath.fromPath(uri,path);
final SwiftObjectPath actual=new SwiftObjectPath(RestClientBindings.extractContainerName(uri),pathString);
assertEquals(expected,actual);
}
EqualityVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testHandleUrlAsPath() throws Exception {
final String hostPart="swift://container.service1";
final String pathPart="/home/user/files/file1";
final String uriString=hostPart + pathPart;
final SwiftObjectPath expected=new SwiftObjectPath(uriString,pathPart);
final SwiftObjectPath actual=new SwiftObjectPath(uriString,uriString);
assertEquals(expected,actual);
}
APIUtilityVerifierEqualityVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testParseAuthenticatedUrl() throws Exception {
final String pathString="swift://container.service1/v2/AUTH_00345h34l93459y4/home/tom/documents/finance.docx";
final URI uri=new URI(pathString);
final Path path=new Path(pathString);
final SwiftObjectPath expected=SwiftObjectPath.fromPath(uri,path);
final SwiftObjectPath actual=new SwiftObjectPath(RestClientBindings.extractContainerName(uri),"/home/tom/documents/finance.docx");
assertEquals(expected,actual);
}
InternalCallVerifierEqualityVerifier
/**
* Test that getContentSummary can be retrieved on the client side.
*/
@Test public void testGetContentSummary() throws IOException {
FileSystem hFs=cluster.getFileSystem(0);
final DistributedFileSystem dfs=(DistributedFileSystem)hFs;
dfs.setQuota(testFileDirPath,100,500);
ContentSummary cs=vfs.getContentSummary(testFileDirPath);
assertEquals(100,cs.getQuota());
assertEquals(500,cs.getSpaceQuota());
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test that default blocksize values can be retrieved on the client side.
*/
@Test public void testGetDefaultBlockSize() throws IOException, URISyntaxException {
try {
vfs.getDefaultBlockSize();
fail("getServerDefaults on viewFs did not throw excetion!");
}
catch ( NotInMountpointException e) {
assertEquals(vfs.getDefaultBlockSize(testFilePath),DFS_BLOCK_SIZE_DEFAULT);
}
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test that server default values can be retrieved on the client side.
*/
@Test public void testServerDefaults() throws IOException {
try {
FsServerDefaults serverDefaults=vfs.getServerDefaults();
fail("getServerDefaults on viewFs did not throw excetion!");
}
catch ( NotInMountpointException e) {
FsServerDefaults serverDefaults=vfs.getServerDefaults(testFilePath);
assertEquals(DFS_BLOCK_SIZE_DEFAULT,serverDefaults.getBlockSize());
assertEquals(DFS_BYTES_PER_CHECKSUM_DEFAULT,serverDefaults.getBytesPerChecksum());
assertEquals(DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT,serverDefaults.getWritePacketSize());
assertEquals(IO_FILE_BUFFER_SIZE_DEFAULT,serverDefaults.getFileBufferSize());
assertEquals(DFS_REPLICATION_DEFAULT + 1,serverDefaults.getReplication());
}
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test that default replication values can be retrieved on the client side.
*/
@Test public void testGetDefaultReplication() throws IOException, URISyntaxException {
try {
vfs.getDefaultReplication();
fail("getDefaultReplication on viewFs did not throw excetion!");
}
catch ( NotInMountpointException e) {
assertEquals(vfs.getDefaultReplication(testFilePath),DFS_REPLICATION_DEFAULT + 1);
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testGetFileChecksum() throws IOException, URISyntaxException {
fileSystemTestHelper.createFile(fHdfs,someFile);
fileSystemTestHelper.createFile(fHdfs,fileSystemTestHelper.getTestRootPath(fHdfs,someFile + "other"),1,512);
FileChecksum viewFSCheckSum=vfs.getFileChecksum(new Path("/vfstmp/someFileForTestGetFileChecksum"));
FileChecksum hdfsCheckSum=fHdfs.getFileChecksum(new Path(someFile));
FileChecksum otherHdfsFileCheckSum=fHdfs.getFileChecksum(new Path(someFile + "other"));
assertEquals("HDFS and ViewFS checksums were not the same",viewFSCheckSum,hdfsCheckSum);
assertFalse("Some other HDFS file which should not have had the same " + "checksum as viewFS did!",viewFSCheckSum.equals(otherHdfsFileCheckSum));
}
InternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test "readOps" (e.g. list, listStatus)
* on internal dirs of mount table
* These operations should succeed.
*/
@Test public void testListOnInternalDirsOfMountTable() throws IOException {
FileStatus[] dirPaths=fsView.listStatus(new Path("/"));
FileStatus fs;
verifyRootChildren(dirPaths);
dirPaths=fsView.listStatus(new Path("/internalDir"));
Assert.assertEquals(2,dirPaths.length);
fs=fileSystemTestHelper.containsPath(fsView,"/internalDir/internalDir2",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink",fs.isDirectory());
fs=fileSystemTestHelper.containsPath(fsView,"/internalDir/linkToDir2",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink",fs.isSymlink());
}
InternalCallVerifierEqualityVerifier
/**
* This default implementation is when viewfs has mount points
* into file systems, such as LocalFs that do no have delegation tokens.
* It should be overridden for when mount points into hdfs.
*/
@Test public void testGetDelegationTokens() throws IOException {
Token>[] delTokens=fsView.addDelegationTokens("sanjay",new Credentials());
Assert.assertEquals(getExpectedDelegationTokenCount(),delTokens.length);
}
Class: org.apache.hadoop.fs.viewfs.ViewFsBaseTest
InternalCallVerifierEqualityVerifier
/**
* This default implementation is when viewfs has mount points
* into file systems, such as LocalFs that do no have delegation tokens.
* It should be overridden for when mount points into hdfs.
*/
@Test public void testGetDelegationTokens() throws IOException {
List> delTokens=fcView.getDelegationTokens(new Path("/"),"sanjay");
Assert.assertEquals(getExpectedDelegationTokenCount(),delTokens.size());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testInternalGetAclStatus() throws IOException {
final UserGroupInformation currentUser=UserGroupInformation.getCurrentUser();
AclStatus aclStatus=fcView.getAclStatus(new Path("/internalDir"));
assertEquals(aclStatus.getOwner(),currentUser.getUserName());
assertEquals(aclStatus.getGroup(),currentUser.getGroupNames()[0]);
assertEquals(aclStatus.getEntries(),AclUtil.getMinimalAcl(PERMISSION_555));
assertFalse(aclStatus.isStickyBit());
}
InternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test "readOps" (e.g. list, listStatus)
* on internal dirs of mount table
* These operations should succeed.
*/
@Test public void testListOnInternalDirsOfMountTable() throws IOException {
FileStatus[] dirPaths=fcView.util().listStatus(new Path("/"));
FileStatus fs;
Assert.assertEquals(7,dirPaths.length);
fs=fileContextTestHelper.containsPath(fcView,"/user",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink",fs.isSymlink());
fs=fileContextTestHelper.containsPath(fcView,"/data",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink",fs.isSymlink());
fs=fileContextTestHelper.containsPath(fcView,"/internalDir",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("InternalDirs should appear as dir",fs.isDirectory());
fs=fileContextTestHelper.containsPath(fcView,"/danglingLink",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink",fs.isSymlink());
fs=fileContextTestHelper.containsPath(fcView,"/linkToAFile",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink",fs.isSymlink());
dirPaths=fcView.util().listStatus(new Path("/internalDir"));
Assert.assertEquals(2,dirPaths.length);
fs=fileContextTestHelper.containsPath(fcView,"/internalDir/internalDir2",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("InternalDirs should appear as dir",fs.isDirectory());
fs=fileContextTestHelper.containsPath(fcView,"/internalDir/linkToDir2",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink",fs.isSymlink());
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* verify that receiveActiveData gives data when active exists, tells that
* active does not exist and reports error in getting active information
* @throws IOException
* @throws InterruptedException
* @throws KeeperException
* @throws ActiveNotFoundException
*/
@Test public void testGetActiveData() throws ActiveNotFoundException, KeeperException, InterruptedException, IOException {
byte[] data=new byte[8];
Mockito.when(mockZK.getData(Mockito.eq(ZK_LOCK_NAME),Mockito.eq(false),Mockito.anyObject())).thenReturn(data);
Assert.assertEquals(data,elector.getActiveData());
Mockito.verify(mockZK,Mockito.times(1)).getData(Mockito.eq(ZK_LOCK_NAME),Mockito.eq(false),Mockito.anyObject());
Mockito.when(mockZK.getData(Mockito.eq(ZK_LOCK_NAME),Mockito.eq(false),Mockito.anyObject())).thenThrow(new KeeperException.NoNodeException());
try {
elector.getActiveData();
Assert.fail("ActiveNotFoundException expected");
}
catch ( ActiveNotFoundException e) {
Mockito.verify(mockZK,Mockito.times(2)).getData(Mockito.eq(ZK_LOCK_NAME),Mockito.eq(false),Mockito.anyObject());
}
try {
Mockito.when(mockZK.getData(Mockito.eq(ZK_LOCK_NAME),Mockito.eq(false),Mockito.anyObject())).thenThrow(new KeeperException.AuthFailedException());
elector.getActiveData();
Assert.fail("KeeperException.AuthFailedException expected");
}
catch ( KeeperException.AuthFailedException ke) {
Mockito.verify(mockZK,Mockito.times(3)).getData(Mockito.eq(ZK_LOCK_NAME),Mockito.eq(false),Mockito.anyObject());
}
}
EqualityVerifier
/**
* verify quit election terminates connection and there are no new watches.
* next call to joinElection creates new connection and performs election
*/
@Test public void testQuitElection() throws Exception {
elector.joinElection(data);
Mockito.verify(mockZK,Mockito.times(0)).close();
elector.quitElection(true);
Mockito.verify(mockZK,Mockito.times(1)).close();
verifyExistCall(0);
byte[] data=new byte[8];
elector.joinElection(data);
Assert.assertEquals(2,count);
elector.processResult(Code.NODEEXISTS.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME);
Mockito.verify(mockApp,Mockito.times(1)).becomeStandby();
verifyExistCall(1);
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Verify that, when the callback fails to enter active state,
* the elector rejoins the election after sleeping for a short period.
*/
@Test public void testFailToBecomeActive() throws Exception {
mockNoPriorActive();
elector.joinElection(data);
Assert.assertEquals(0,elector.sleptFor);
Mockito.doThrow(new ServiceFailedException("failed to become active")).when(mockApp).becomeActive();
elector.processResult(Code.OK.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME);
Mockito.verify(mockApp).becomeActive();
Mockito.verify(mockZK,Mockito.times(2)).create(ZK_LOCK_NAME,data,Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL,elector,mockZK);
Assert.assertEquals(2,count);
Assert.assertTrue(elector.sleptFor > 0);
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Verify that, when the callback fails to enter active state, after
* a ZK disconnect (i.e from the StatCallback), that the elector rejoins
* the election after sleeping for a short period.
*/
@Test public void testFailToBecomeActiveAfterZKDisconnect() throws Exception {
mockNoPriorActive();
elector.joinElection(data);
Assert.assertEquals(0,elector.sleptFor);
elector.processResult(Code.CONNECTIONLOSS.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME);
Mockito.verify(mockZK,Mockito.times(2)).create(ZK_LOCK_NAME,data,Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL,elector,mockZK);
elector.processResult(Code.NODEEXISTS.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME);
verifyExistCall(1);
Stat stat=new Stat();
stat.setEphemeralOwner(1L);
Mockito.when(mockZK.getSessionId()).thenReturn(1L);
Mockito.doThrow(new ServiceFailedException("fail to become active")).when(mockApp).becomeActive();
elector.processResult(Code.OK.intValue(),ZK_LOCK_NAME,mockZK,stat);
Mockito.verify(mockApp,Mockito.times(1)).becomeActive();
Mockito.verify(mockZK,Mockito.times(3)).create(ZK_LOCK_NAME,data,Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL,elector,mockZK);
Assert.assertEquals(2,count);
Assert.assertTrue(elector.sleptFor > 0);
}
UtilityVerifierEqualityVerifierHybridVerifier
@Test public void testFailoverFromActiveToActive() throws Exception {
DummyHAService svc1=new DummyHAService(HAServiceState.ACTIVE,svc1Addr);
DummyHAService svc2=new DummyHAService(HAServiceState.ACTIVE,svc2Addr);
svc1.fencer=svc2.fencer=setupFencer(AlwaysSucceedFencer.class.getName());
try {
doFailover(svc1,svc2,false,false);
fail("Can't failover to an already active service");
}
catch ( FailoverFailedException ffe) {
}
assertEquals(HAServiceState.ACTIVE,svc1.state);
assertEquals(HAServiceState.ACTIVE,svc2.state);
}
Class: org.apache.hadoop.ha.TestHAAdmin
EqualityVerifier
@Test public void testAdminUsage() throws Exception {
assertEquals(-1,runTool());
assertOutputContains("Usage:");
assertOutputContains("-transitionToActive");
assertEquals(-1,runTool("badCommand"));
assertOutputContains("Bad command 'badCommand'");
assertEquals(-1,runTool("-badCommand"));
assertOutputContains("badCommand: Unknown");
assertEquals(-1,runTool("-transitionToActive"));
assertOutputContains("transitionToActive: incorrect number of arguments");
assertEquals(-1,runTool("-transitionToActive","x","y"));
assertOutputContains("transitionToActive: incorrect number of arguments");
assertEquals(-1,runTool("-failover"));
assertOutputContains("failover: incorrect arguments");
assertOutputContains("failover: incorrect arguments");
assertEquals(-1,runTool("-failover","foo:1234"));
assertOutputContains("failover: incorrect arguments");
}
EqualityVerifier
@Test public void testHelp() throws Exception {
assertEquals(0,runTool("-help"));
assertEquals(0,runTool("-help","transitionToActive"));
assertOutputContains("Transitions the service into Active");
}
Class: org.apache.hadoop.ha.TestNodeFencer
BooleanVerifierIdentityVerifierEqualityVerifierHybridVerifier
@Test public void testWhitespaceAndCommentsInConfig() throws BadFencingConfigurationException {
NodeFencer fencer=setupFencer("\n" + " # the next one will always fail\n" + " " + AlwaysFailFencer.class.getName() + "(foo) # <- fails\n"+ AlwaysSucceedFencer.class.getName()+ "(bar) \n");
assertTrue(fencer.fence(MOCK_TARGET));
assertEquals(1,AlwaysFailFencer.fenceCalled);
assertSame(MOCK_TARGET,AlwaysFailFencer.fencedSvc);
assertEquals(1,AlwaysSucceedFencer.fenceCalled);
assertSame(MOCK_TARGET,AlwaysSucceedFencer.fencedSvc);
assertEquals("foo",AlwaysFailFencer.callArgs.get(0));
assertEquals("bar",AlwaysSucceedFencer.callArgs.get(0));
}
InternalCallVerifierEqualityVerifier
/**
* Test that the various command lines for formatting the ZK directory
* function correctly.
*/
@Test(timeout=15000) public void testFormatZK() throws Exception {
DummyHAService svc=cluster.getService(1);
assertEquals(ZKFailoverController.ERR_CODE_NO_PARENT_ZNODE,runFC(svc));
assertEquals(0,runFC(svc,"-formatZK"));
assertEquals(ZKFailoverController.ERR_CODE_FORMAT_DENIED,runFC(svc,"-formatZK","-nonInteractive"));
assertEquals(0,runFC(svc,"-formatZK","-force"));
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Test that automatic failover won't run against a target that hasn't
* explicitly enabled the feature.
*/
@Test(timeout=10000) public void testWontRunWhenAutoFailoverDisabled() throws Exception {
DummyHAService svc=cluster.getService(1);
svc=Mockito.spy(svc);
Mockito.doReturn(false).when(svc).isAutoFailoverEnabled();
assertEquals(ZKFailoverController.ERR_CODE_AUTO_FAILOVER_NOT_ENABLED,runFC(svc,"-formatZK"));
assertEquals(ZKFailoverController.ERR_CODE_AUTO_FAILOVER_NOT_ENABLED,runFC(svc));
}
InternalCallVerifierEqualityVerifier
/**
* Test that the ZKFC won't run if fencing is not configured for the
* local service.
*/
@Test(timeout=15000) public void testFencingMustBeConfigured() throws Exception {
DummyHAService svc=Mockito.spy(cluster.getService(0));
Mockito.doThrow(new BadFencingConfigurationException("no fencing")).when(svc).checkFencingConfigured();
assertEquals(0,runFC(svc,"-formatZK"));
assertEquals(ZKFailoverController.ERR_CODE_NO_FENCER,runFC(svc));
}
InternalCallVerifierEqualityVerifier
/**
* Test that if ZooKeeper is not running, the correct error
* code is returned.
*/
@Test(timeout=15000) public void testNoZK() throws Exception {
stopServer();
DummyHAService svc=cluster.getService(1);
assertEquals(ZKFailoverController.ERR_CODE_NO_ZK,runFC(svc));
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test(timeout=15000) public void testGracefulFailoverFailBecomingActive() throws Exception {
try {
cluster.start();
cluster.waitForActiveLockHolder(0);
cluster.setFailToBecomeActive(1,true);
try {
cluster.getService(1).getZKFCProxy(conf,5000).gracefulFailover();
fail("Did not fail to graceful failover when target failed " + "to become active!");
}
catch ( ServiceFailedException sfe) {
GenericTestUtils.assertExceptionContains("Couldn't make " + cluster.getService(1) + " active",sfe);
GenericTestUtils.assertExceptionContains("injected failure",sfe);
}
assertEquals(0,cluster.getService(0).fenceCount);
assertEquals(0,cluster.getService(1).fenceCount);
cluster.waitForActiveLockHolder(0);
}
finally {
cluster.stop();
}
}
InternalCallVerifierEqualityVerifier
/**
* Test that, when ZooKeeper fails, the system remains in its
* current state, without triggering any failovers, and without
* causing the active node to enter standby state.
*/
@Test(timeout=15000) public void testZooKeeperFailure() throws Exception {
try {
cluster.start();
long session0=cluster.getElector(0).getZKSessionIdForTests();
long session1=cluster.getElector(1).getZKSessionIdForTests();
LOG.info("====== Stopping ZK server");
stopServer();
waitForServerDown(hostPort,CONNECTION_TIMEOUT);
LOG.info("====== Waiting for services to enter NEUTRAL mode");
cluster.waitForElectorState(0,ActiveStandbyElector.State.NEUTRAL);
cluster.waitForElectorState(1,ActiveStandbyElector.State.NEUTRAL);
LOG.info("====== Checking that the services didn't change HA state");
assertEquals(HAServiceState.ACTIVE,cluster.getService(0).state);
assertEquals(HAServiceState.STANDBY,cluster.getService(1).state);
LOG.info("====== Restarting server");
startServer();
waitForServerUp(hostPort,CONNECTION_TIMEOUT);
cluster.waitForElectorState(0,ActiveStandbyElector.State.ACTIVE);
cluster.waitForElectorState(1,ActiveStandbyElector.State.STANDBY);
cluster.waitForHAState(0,HAServiceState.ACTIVE);
cluster.waitForHAState(1,HAServiceState.STANDBY);
assertEquals(session0,cluster.getElector(0).getZKSessionIdForTests());
assertEquals(session1,cluster.getElector(1).getZKSessionIdForTests());
}
finally {
cluster.stop();
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that the ZKFC can gracefully cede its active status.
*/
@Test(timeout=15000) public void testCedeActive() throws Exception {
try {
cluster.start();
DummyZKFC zkfc=cluster.getZkfc(0);
assertEquals(ActiveStandbyElector.State.ACTIVE,zkfc.getElectorForTests().getStateForTests());
ZKFCProtocol proxy=zkfc.getLocalTarget().getZKFCProxy(conf,5000);
long st=Time.now();
proxy.cedeActive(3000);
long et=Time.now();
assertTrue("RPC to cedeActive took " + (et - st) + " ms",et - st < 1000);
assertEquals(ActiveStandbyElector.State.INIT,zkfc.getElectorForTests().getStateForTests());
cluster.waitForElectorState(0,ActiveStandbyElector.State.STANDBY);
long et2=Time.now();
assertTrue("Should take ~3 seconds to rejoin. Only took " + (et2 - et) + "ms before rejoining.",et2 - et > 2800);
}
finally {
cluster.stop();
}
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test that, if ACLs are specified in the configuration, that
* it sets the ACLs when formatting the parent node.
*/
@Test(timeout=15000) public void testFormatSetsAcls() throws Exception {
DummyHAService svc=cluster.getService(1);
assertEquals(0,runFC(svc,"-formatZK"));
ZooKeeper otherClient=createClient();
try {
Stat stat=new Stat();
otherClient.getData(ZKFailoverController.ZK_PARENT_ZNODE_DEFAULT,false,stat);
fail("Was able to read data without authenticating!");
}
catch ( KeeperException.NoAuthException nae) {
}
}
Class: org.apache.hadoop.hdfs.TestAbandonBlock
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testAbandonBlock() throws IOException {
String src=FILE_NAME_PREFIX + "foo";
FSDataOutputStream fout=fs.create(new Path(src),true,4096,(short)1,512L);
for (int i=0; i < 1024; i++) {
fout.write(123);
}
fout.hflush();
long fileId=((DFSOutputStream)fout.getWrappedStream()).getFileId();
DFSClient dfsclient=DFSClientAdapter.getDFSClient(fs);
LocatedBlocks blocks=dfsclient.getNamenode().getBlockLocations(src,0,Integer.MAX_VALUE);
int orginalNumBlocks=blocks.locatedBlockCount();
LocatedBlock b=blocks.getLastLocatedBlock();
dfsclient.getNamenode().abandonBlock(b.getBlock(),fileId,src,dfsclient.clientName);
dfsclient.getNamenode().abandonBlock(b.getBlock(),fileId,src,dfsclient.clientName);
fout.close();
cluster.restartNameNode();
blocks=dfsclient.getNamenode().getBlockLocations(src,0,Integer.MAX_VALUE);
Assert.assertEquals("Blocks " + b + " has not been abandoned.",orginalNumBlocks,blocks.locatedBlockCount() + 1);
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* When an InterruptedException is sent to a thread calling
* FileChannel#read, the FileChannel is immediately closed and the
* thread gets an exception. This effectively means that we might have
* someone asynchronously calling close() on the file descriptors we use
* in BlockReaderLocal. So when unreferencing a ShortCircuitReplica in
* ShortCircuitCache#unref, we should check if the FileChannel objects
* are still open. If not, we should purge the replica to avoid giving
* it out to any future readers.
* This is a regression test for HDFS-6227: Short circuit read failed
* due to ClosedChannelException.
* Note that you may still get ClosedChannelException errors if two threads
* are reading from the same replica and an InterruptedException is delivered
* to one of them.
*/
@Test(timeout=120000) public void testPurgingClosedReplicas() throws Exception {
BlockReaderTestUtil.enableBlockReaderFactoryTracing();
final AtomicInteger replicasCreated=new AtomicInteger(0);
final AtomicBoolean testFailed=new AtomicBoolean(false);
DFSInputStream.tcpReadsDisabledForTesting=true;
BlockReaderFactory.createShortCircuitReplicaInfoCallback=new ShortCircuitCache.ShortCircuitReplicaCreator(){
@Override public ShortCircuitReplicaInfo createShortCircuitReplicaInfo(){
replicasCreated.incrementAndGet();
return null;
}
}
;
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration conf=createShortCircuitConf("testPurgingClosedReplicas",sockDir);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem dfs=cluster.getFileSystem();
final String TEST_FILE="/test_file";
final int TEST_FILE_LEN=4095;
final int SEED=0xFADE0;
final DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(0),conf);
DFSTestUtil.createFile(fs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
final Semaphore sem=new Semaphore(0);
final List locatedBlocks=cluster.getNameNode().getRpcServer().getBlockLocations(TEST_FILE,0,TEST_FILE_LEN).getLocatedBlocks();
final LocatedBlock lblock=locatedBlocks.get(0);
final byte[] buf=new byte[TEST_FILE_LEN];
Runnable readerRunnable=new Runnable(){
@Override public void run(){
try {
while (true) {
BlockReader blockReader=null;
try {
blockReader=BlockReaderTestUtil.getBlockReader(cluster,lblock,0,TEST_FILE_LEN);
sem.release();
try {
blockReader.readAll(buf,0,TEST_FILE_LEN);
}
finally {
sem.acquireUninterruptibly();
}
}
catch ( ClosedByInterruptException e) {
LOG.info("got the expected ClosedByInterruptException",e);
sem.release();
break;
}
finally {
if (blockReader != null) blockReader.close();
}
LOG.info("read another " + TEST_FILE_LEN + " bytes.");
}
}
catch ( Throwable t) {
LOG.error("getBlockReader failure",t);
testFailed.set(true);
sem.release();
}
}
}
;
Thread thread=new Thread(readerRunnable);
thread.start();
while (thread.isAlive()) {
sem.acquireUninterruptibly();
thread.interrupt();
sem.release();
}
Assert.assertFalse(testFailed.get());
BlockReader blockReader=null;
try {
blockReader=BlockReaderTestUtil.getBlockReader(cluster,lblock,0,TEST_FILE_LEN);
blockReader.readFully(buf,0,TEST_FILE_LEN);
}
finally {
if (blockReader != null) blockReader.close();
}
byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(buf,expected));
Assert.assertEquals(2,replicasCreated.get());
dfs.close();
cluster.shutdown();
sockDir.close();
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that a client which supports short-circuit reads using
* shared memory can fall back to not using shared memory when
* the server doesn't support it.
*/
@Test public void testShortCircuitReadFromServerWithoutShm() throws Exception {
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration clientConf=createShortCircuitConf("testShortCircuitReadFromServerWithoutShm",sockDir);
Configuration serverConf=new Configuration(clientConf);
serverConf.setInt(DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS,0);
DFSInputStream.tcpReadsDisabledForTesting=true;
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
clientConf.set(DFS_CLIENT_CONTEXT,"testShortCircuitReadFromServerWithoutShm_clientContext");
final DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(0),clientConf);
final String TEST_FILE="/test_file";
final int TEST_FILE_LEN=4000;
final int SEED=0xFADEC;
DFSTestUtil.createFile(fs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
byte contents[]=DFSTestUtil.readFileBuffer(fs,new Path(TEST_FILE));
byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents,expected));
final ShortCircuitCache cache=fs.dfs.getClientContext().getShortCircuitCache();
final DatanodeInfo datanode=new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
cache.getDfsClientShmManager().visit(new Visitor(){
@Override public void visit( HashMap info) throws IOException {
Assert.assertEquals(1,info.size());
PerDatanodeVisitorInfo vinfo=info.get(datanode);
Assert.assertTrue(vinfo.disabled);
Assert.assertEquals(0,vinfo.full.size());
Assert.assertEquals(0,vinfo.notFull.size());
}
}
);
cluster.shutdown();
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
/**
* Test that a client which does not support short-circuit reads using
* shared memory can talk with a server which supports it.
*/
@Test public void testShortCircuitReadFromClientWithoutShm() throws Exception {
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration clientConf=createShortCircuitConf("testShortCircuitReadWithoutShm",sockDir);
Configuration serverConf=new Configuration(clientConf);
DFSInputStream.tcpReadsDisabledForTesting=true;
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
clientConf.setInt(DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS,0);
clientConf.set(DFS_CLIENT_CONTEXT,"testShortCircuitReadFromClientWithoutShm_clientContext");
final DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(0),clientConf);
final String TEST_FILE="/test_file";
final int TEST_FILE_LEN=4000;
final int SEED=0xFADEC;
DFSTestUtil.createFile(fs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
byte contents[]=DFSTestUtil.readFileBuffer(fs,new Path(TEST_FILE));
byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents,expected));
final ShortCircuitCache cache=fs.dfs.getClientContext().getShortCircuitCache();
Assert.assertEquals(null,cache.getDfsClientShmManager());
cluster.shutdown();
}
AssumptionSetterEqualityVerifierHybridVerifier
@Test public void testBothOldAndNewShortCircuitConfigured() throws Exception {
final short REPL_FACTOR=1;
final int FILE_LENGTH=512;
Assume.assumeTrue(null == DomainSocket.getLoadingFailureReason());
TemporarySocketDirectory socketDir=new TemporarySocketDirectory();
HdfsConfiguration conf=getConfiguration(socketDir);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
socketDir.close();
FileSystem fs=cluster.getFileSystem();
Path path=new Path("/foo");
byte orig[]=new byte[FILE_LENGTH];
for (int i=0; i < orig.length; i++) {
orig[i]=(byte)(i % 10);
}
FSDataOutputStream fos=fs.create(path,(short)1);
fos.write(orig);
fos.close();
DFSTestUtil.waitReplication(fs,path,REPL_FACTOR);
FSDataInputStream fis=cluster.getFileSystem().open(path);
byte buf[]=new byte[FILE_LENGTH];
IOUtils.readFully(fis,buf,0,FILE_LENGTH);
fis.close();
Assert.assertArrayEquals(orig,buf);
Arrays.equals(orig,buf);
cluster.shutdown();
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that, in the case of an error, the position and limit of a ByteBuffer
* are left unchanged. This is not mandated by ByteBufferReadable, but clients
* of this class might immediately issue a retry on failure, so it's polite.
*/
@Test public void testStablePositionAfterCorruptRead() throws Exception {
final short REPL_FACTOR=1;
final long FILE_LENGTH=512L;
HdfsConfiguration conf=getConfiguration(null);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
Path path=new Path("/corrupted");
DFSTestUtil.createFile(fs,path,FILE_LENGTH,REPL_FACTOR,12345L);
DFSTestUtil.waitReplication(fs,path,REPL_FACTOR);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,path);
int blockFilesCorrupted=cluster.corruptBlockOnDataNodes(block);
assertEquals("All replicas not corrupted",REPL_FACTOR,blockFilesCorrupted);
FSDataInputStream dis=cluster.getFileSystem().open(path);
ByteBuffer buf=ByteBuffer.allocateDirect((int)FILE_LENGTH);
boolean sawException=false;
try {
dis.read(buf);
}
catch ( ChecksumException ex) {
sawException=true;
}
assertTrue(sawException);
assertEquals(0,buf.position());
assertEquals(buf.capacity(),buf.limit());
dis=cluster.getFileSystem().open(path);
buf.position(3);
buf.limit(25);
sawException=false;
try {
dis.read(buf);
}
catch ( ChecksumException ex) {
sawException=true;
}
assertTrue(sawException);
assertEquals(3,buf.position());
assertEquals(25,buf.limit());
cluster.shutdown();
}
InternalCallVerifierEqualityVerifier
/**
* Test recovery on restart OOB message. It also tests the delivery of
* OOB ack originating from the primary datanode. Since there is only
* one node in the cluster, failure of restart-recovery will fail the
* test.
*/
@Test public void testPipelineRecoveryOnOOB() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY,"15");
MiniDFSCluster cluster=null;
try {
int numDataNodes=1;
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
FileSystem fileSys=cluster.getFileSystem();
Path file=new Path("dataprotocol2.dat");
DFSTestUtil.createFile(fileSys,file,10240L,(short)1,0L);
DFSOutputStream out=(DFSOutputStream)(fileSys.append(file).getWrappedStream());
out.write(1);
out.hflush();
DFSAdmin dfsadmin=new DFSAdmin(conf);
DataNode dn=cluster.getDataNodes().get(0);
final String dnAddr=dn.getDatanodeId().getIpcAddr(false);
final String[] args1={"-shutdownDatanode",dnAddr,"upgrade"};
Assert.assertEquals(0,dfsadmin.run(args1));
Thread.sleep(4000);
cluster.restartDataNode(0,true);
out.close();
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifierEqualityVerifier
/**
* Read a file served entirely from one DN. Seek around and read from
* different offsets. And verify that they all use the same socket.
* @throws Exception
*/
@Test public void testReadFromOneDN() throws Exception {
HdfsConfiguration configuration=new HdfsConfiguration();
final String contextName="testReadFromOneDNContext";
configuration.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,contextName);
configuration.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,100000000L);
BlockReaderTestUtil util=new BlockReaderTestUtil(1,configuration);
final Path testFile=new Path("/testConnCache.dat");
byte authenticData[]=util.writeFile(testFile,FILE_SIZE / 1024);
DFSClient client=new DFSClient(new InetSocketAddress("localhost",util.getCluster().getNameNodePort()),util.getConf());
ClientContext cacheContext=ClientContext.get(contextName,client.getConf());
DFSInputStream in=client.open(testFile.toString());
LOG.info("opened " + testFile.toString());
byte[] dataBuf=new byte[BLOCK_SIZE];
pread(in,0,dataBuf,0,dataBuf.length,authenticData);
pread(in,FILE_SIZE - dataBuf.length,dataBuf,0,dataBuf.length,authenticData);
pread(in,1024,dataBuf,0,dataBuf.length,authenticData);
pread(in,-1,dataBuf,0,dataBuf.length,authenticData);
pread(in,64,dataBuf,0,dataBuf.length / 2,authenticData);
in.close();
client.close();
Assert.assertEquals(1,ClientContext.getFromConf(configuration).getPeerCache().size());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Make sure that client failover works when an active NN dies and the standby
* takes over.
*/
@Test public void testDfsClientFailover() throws IOException, URISyntaxException {
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
DFSTestUtil.createFile(fs,TEST_FILE,FILE_LENGTH_TO_VERIFY,(short)1,1L);
assertEquals(fs.getFileStatus(TEST_FILE).getLen(),FILE_LENGTH_TO_VERIFY);
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
assertEquals(fs.getFileStatus(TEST_FILE).getLen(),FILE_LENGTH_TO_VERIFY);
Path withPort=new Path("hdfs://" + HATestUtil.getLogicalHostname(cluster) + ":"+ NameNode.DEFAULT_PORT+ "/"+ TEST_FILE.toUri().getPath());
FileSystem fs2=withPort.getFileSystem(fs.getConf());
assertTrue(fs2.exists(withPort));
fs.close();
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Test that getAdditionalBlock() and close() are idempotent. This allows
* a client to safely retry a call and still produce a correct
* file. See HDFS-3031.
*/
@Test public void testIdempotentAllocateBlockAndClose() throws Exception {
final String src="/testIdempotentAllocateBlock";
Path file=new Path(src);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,4096);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
NamenodeProtocols preSpyNN=cluster.getNameNodeRpc();
NamenodeProtocols spyNN=spy(preSpyNN);
DFSClient client=new DFSClient(null,spyNN,conf,null);
doAnswer(new Answer(){
@Override public LocatedBlock answer( InvocationOnMock invocation) throws Throwable {
LocatedBlock ret=(LocatedBlock)invocation.callRealMethod();
LocatedBlocks lb=cluster.getNameNodeRpc().getBlockLocations(src,0,Long.MAX_VALUE);
int blockCount=lb.getLocatedBlocks().size();
assertEquals(lb.getLastLocatedBlock().getBlock(),ret.getBlock());
LocatedBlock ret2=(LocatedBlock)invocation.callRealMethod();
lb=cluster.getNameNodeRpc().getBlockLocations(src,0,Long.MAX_VALUE);
int blockCount2=lb.getLocatedBlocks().size();
assertEquals(lb.getLastLocatedBlock().getBlock(),ret2.getBlock());
assertEquals(blockCount,blockCount2);
return ret2;
}
}
).when(spyNN).addBlock(Mockito.anyString(),Mockito.anyString(),Mockito.any(),Mockito.any(),Mockito.anyLong(),Mockito.any());
doAnswer(new Answer(){
@Override public Boolean answer( InvocationOnMock invocation) throws Throwable {
LOG.info("Called complete(: " + Joiner.on(",").join(invocation.getArguments()) + ")");
if (!(Boolean)invocation.callRealMethod()) {
LOG.info("Complete call returned false, not faking a retry RPC");
return false;
}
try {
boolean ret=(Boolean)invocation.callRealMethod();
LOG.info("Complete call returned true, faked second RPC. " + "Returned: " + ret);
return ret;
}
catch ( Throwable t) {
LOG.error("Idempotent retry threw exception",t);
throw t;
}
}
}
).when(spyNN).complete(Mockito.anyString(),Mockito.anyString(),Mockito.any(),anyLong());
OutputStream stm=client.create(file.toString(),true);
try {
AppendTestUtil.write(stm,0,10000);
stm.close();
stm=null;
}
finally {
IOUtils.cleanup(LOG,stm);
}
Mockito.verify(spyNN,Mockito.atLeastOnce()).addBlock(Mockito.anyString(),Mockito.anyString(),Mockito.any(),Mockito.any(),Mockito.anyLong(),Mockito.any());
Mockito.verify(spyNN,Mockito.atLeastOnce()).complete(Mockito.anyString(),Mockito.anyString(),Mockito.any(),anyLong());
AppendTestUtil.check(fs,file,10000);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifierIterativeVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test that checksum failures are recovered from by the next read on the same
* DFSInputStream. Corruption information is not persisted from read call to
* read call, so the client should expect consecutive calls to behave the same
* way. See HDFS-3067.
*/
@Test public void testRetryOnChecksumFailure() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
final short REPL_FACTOR=1;
final long FILE_LENGTH=512L;
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
Path path=new Path("/corrupted");
DFSTestUtil.createFile(fs,path,FILE_LENGTH,REPL_FACTOR,12345L);
DFSTestUtil.waitReplication(fs,path,REPL_FACTOR);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,path);
int blockFilesCorrupted=cluster.corruptBlockOnDataNodes(block);
assertEquals("All replicas not corrupted",REPL_FACTOR,blockFilesCorrupted);
InetSocketAddress nnAddr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(nnAddr,conf);
DFSInputStream dis=client.open(path.toString());
byte[] arr=new byte[(int)FILE_LENGTH];
for (int i=0; i < 2; ++i) {
try {
dis.read(arr,0,(int)FILE_LENGTH);
fail("Expected ChecksumException not thrown");
}
catch ( Exception ex) {
GenericTestUtils.assertExceptionContains("Checksum error",ex);
}
}
}
finally {
cluster.shutdown();
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testGetFileChecksum() throws Exception {
final String f="/testGetFileChecksum";
final Path p=new Path(f);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
try {
cluster.waitActive();
final FileSystem fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,p,1L << 20,(short)3,20100402L);
final FileChecksum cs1=fs.getFileChecksum(p);
assertTrue(cs1 != null);
final List locatedblocks=DFSClient.callGetBlockLocations(cluster.getNameNodeRpc(),f,0,Long.MAX_VALUE).getLocatedBlocks();
final DatanodeInfo first=locatedblocks.get(0).getLocations()[0];
cluster.stopDataNode(first.getXferAddr());
final FileChecksum cs2=fs.getFileChecksum(p);
assertEquals(cs1,cs2);
}
finally {
cluster.shutdown();
}
}
Class: org.apache.hadoop.hdfs.TestDFSOutputStream
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* The close() method of DFSOutputStream should never throw the same exception
* twice. See HDFS-5335 for details.
*/
@Test public void testCloseTwice() throws IOException {
DistributedFileSystem fs=cluster.getFileSystem();
FSDataOutputStream os=fs.create(new Path("/test"));
DFSOutputStream dos=(DFSOutputStream)Whitebox.getInternalState(os,"wrappedStream");
@SuppressWarnings("unchecked") AtomicReference ex=(AtomicReference)Whitebox.getInternalState(dos,"lastException");
Assert.assertEquals(null,ex.get());
dos.close();
IOException dummy=new IOException("dummy");
ex.set(dummy);
try {
dos.close();
}
catch ( IOException e) {
Assert.assertEquals(e,dummy);
}
Assert.assertEquals(null,ex.get());
dos.close();
}
Class: org.apache.hadoop.hdfs.TestDFSRemove
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testRemove() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
FileSystem fs=cluster.getFileSystem();
assertTrue(fs.mkdirs(dir));
long dfsUsedStart=getTotalDfsUsed(cluster);
{
final int fileCount=100;
for (int i=0; i < fileCount; i++) {
Path a=new Path(dir,"a" + i);
createFile(fs,a);
}
long dfsUsedMax=getTotalDfsUsed(cluster);
for (int i=0; i < fileCount; i++) {
Path a=new Path(dir,"a" + i);
fs.delete(a,false);
}
Thread.sleep(3 * DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000);
long dfsUsedFinal=getTotalDfsUsed(cluster);
assertEquals("All blocks should be gone. start=" + dfsUsedStart + " max="+ dfsUsedMax+ " final="+ dfsUsedFinal,dfsUsedStart,dfsUsedFinal);
}
fs.delete(dir,true);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test to make sure that user namespace xattrs can be set only if path has
* access and for sticky directorries, only owner/privileged user can write.
* Trusted namespace xattrs can be set only with privileged users.
* As user1: Create a directory (/foo) as user1, chown it to user1 (and
* user1's group), grant rwx to "other".
* As user2: Set an xattr (should pass with path access).
* As user1: Set an xattr (should pass).
* As user2: Read the xattr (should pass). Remove the xattr (should pass with
* path access).
* As user1: Read the xattr (should pass). Remove the xattr (should pass).
* As user1: Change permissions only to owner
* As User2: Set an Xattr (Should fail set with no path access) Remove an
* Xattr (Should fail with no path access)
* As SuperUser: Set an Xattr with Trusted (Should pass)
*/
@Test(timeout=30000) public void testSetXAttrPermissionAsDifferentOwner() throws Exception {
final String USER1="user1";
final String GROUP1="supergroup";
final UserGroupInformation user1=UserGroupInformation.createUserForTesting(USER1,new String[]{GROUP1});
final UserGroupInformation user2=UserGroupInformation.createUserForTesting("user2",new String[]{"mygroup2"});
final UserGroupInformation SUPERUSER=UserGroupInformation.getCurrentUser();
MiniDFSCluster cluster=null;
PrintStream bak=null;
try {
final Configuration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final FileSystem fs=cluster.getFileSystem();
fs.setOwner(new Path("/"),USER1,GROUP1);
bak=System.err;
final FsShell fshell=new FsShell(conf);
final ByteArrayOutputStream out=new ByteArrayOutputStream();
System.setErr(new PrintStream(out));
user1.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final int ret=ToolRunner.run(fshell,new String[]{"-mkdir","/foo"});
assertEquals("Return should be 0",0,ret);
out.reset();
return null;
}
}
);
user1.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final int ret=ToolRunner.run(fshell,new String[]{"-chmod","707","/foo"});
assertEquals("Return should be 0",0,ret);
out.reset();
return null;
}
}
);
user2.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a1","-v","1234","/foo"});
assertEquals("Returned should be 0",0,ret);
out.reset();
return null;
}
}
);
user1.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a1","-v","1234","/foo"});
assertEquals("Returned should be 0",0,ret);
out.reset();
return null;
}
}
);
user2.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
int ret=ToolRunner.run(fshell,new String[]{"-getfattr","-n","user.a1","/foo"});
assertEquals("Returned should be 0",0,ret);
out.reset();
ret=ToolRunner.run(fshell,new String[]{"-setfattr","-x","user.a1","/foo"});
assertEquals("Returned should be 0",0,ret);
out.reset();
return null;
}
}
);
user1.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
return null;
}
}
);
user1.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final int ret=ToolRunner.run(fshell,new String[]{"-chmod","700","/foo"});
assertEquals("Return should be 0",0,ret);
out.reset();
return null;
}
}
);
user2.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a2","/foo"});
assertEquals("Returned should be 1",1,ret);
final String str=out.toString();
assertTrue("Permission denied printed",str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
}
);
user2.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-x","user.a2","/foo"});
assertEquals("Returned should be 1",1,ret);
final String str=out.toString();
assertTrue("Permission denied printed",str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
}
);
SUPERUSER.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","trusted.a3","/foo"});
assertEquals("Returned should be 0",0,ret);
out.reset();
return null;
}
}
);
}
finally {
if (bak != null) {
System.setErr(bak);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testURIPaths() throws Exception {
Configuration srcConf=new HdfsConfiguration();
Configuration dstConf=new HdfsConfiguration();
MiniDFSCluster srcCluster=null;
MiniDFSCluster dstCluster=null;
File bak=new File(PathUtils.getTestDir(getClass()),"dfs_tmp_uri");
bak.mkdirs();
try {
srcCluster=new MiniDFSCluster.Builder(srcConf).numDataNodes(2).build();
dstConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,bak.getAbsolutePath());
dstCluster=new MiniDFSCluster.Builder(dstConf).numDataNodes(2).build();
FileSystem srcFs=srcCluster.getFileSystem();
FileSystem dstFs=dstCluster.getFileSystem();
FsShell shell=new FsShell();
shell.setConf(srcConf);
String[] argv=new String[2];
argv[0]="-ls";
argv[1]=dstFs.getUri().toString() + "/";
int ret=ToolRunner.run(shell,argv);
assertEquals("ls works on remote uri ",0,ret);
dstFs.mkdirs(new Path("/hadoopdir"));
argv=new String[2];
argv[0]="-rmr";
argv[1]=dstFs.getUri().toString() + "/hadoopdir";
ret=ToolRunner.run(shell,argv);
assertEquals("-rmr works on remote uri " + argv[1],0,ret);
argv[0]="-du";
argv[1]=dstFs.getUri().toString() + "/";
ret=ToolRunner.run(shell,argv);
assertEquals("du works on remote uri ",0,ret);
File furi=new File(TEST_ROOT_DIR,"furi");
createLocalFile(furi);
argv=new String[3];
argv[0]="-put";
argv[1]=furi.toURI().toString();
argv[2]=dstFs.getUri().toString() + "/furi";
ret=ToolRunner.run(shell,argv);
assertEquals(" put is working ",0,ret);
argv[0]="-cp";
argv[1]=dstFs.getUri().toString() + "/furi";
argv[2]=srcFs.getUri().toString() + "/furi";
ret=ToolRunner.run(shell,argv);
assertEquals(" cp is working ",0,ret);
assertTrue(srcFs.exists(new Path("/furi")));
argv=new String[2];
argv[0]="-cat";
argv[1]=dstFs.getUri().toString() + "/furi";
ret=ToolRunner.run(shell,argv);
assertEquals(" cat is working ",0,ret);
dstFs.delete(new Path("/furi"),true);
dstFs.delete(new Path("/hadoopdir"),true);
String file="/tmp/chownTest";
Path path=new Path(file);
Path parent=new Path("/tmp");
Path root=new Path("/");
TestDFSShell.writeFile(dstFs,path);
runCmd(shell,"-chgrp","-R","herbivores",dstFs.getUri().toString() + "/*");
confirmOwner(null,"herbivores",dstFs,parent,path);
runCmd(shell,"-chown","-R",":reptiles",dstFs.getUri().toString() + "/");
confirmOwner(null,"reptiles",dstFs,root,parent,path);
argv[0]="-cat";
argv[1]="hdfs:///furi";
ret=ToolRunner.run(shell,argv);
assertEquals(" default works for cat",0,ret);
argv[0]="-ls";
argv[1]="hdfs:///";
ret=ToolRunner.run(shell,argv);
assertEquals("default works for ls ",0,ret);
argv[0]="-rmr";
argv[1]="hdfs:///furi";
ret=ToolRunner.run(shell,argv);
assertEquals("default works for rm/rmr",0,ret);
}
finally {
if (null != srcCluster) {
srcCluster.shutdown();
}
if (null != dstCluster) {
dstCluster.shutdown();
}
}
}
InternalCallVerifierEqualityVerifier
/**
* default setting is file:// which is not a DFS
* so DFSAdmin should throw and catch InvalidArgumentException
* and return -1 exit code.
* @throws Exception
*/
@Test(timeout=30000) public void testInvalidShell() throws Exception {
Configuration conf=new Configuration();
DFSAdmin admin=new DFSAdmin();
admin.setConf(conf);
int res=admin.run(new String[]{"-refreshNodes"});
assertEquals("expected to fail -1",res,-1);
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testSetXAttrPermission() throws Exception {
UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"});
MiniDFSCluster cluster=null;
PrintStream bak=null;
try {
final Configuration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
Path p=new Path("/foo");
fs.mkdirs(p);
bak=System.err;
final FsShell fshell=new FsShell(conf);
final ByteArrayOutputStream out=new ByteArrayOutputStream();
System.setErr(new PrintStream(out));
fs.setPermission(p,new FsPermission((short)0700));
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a1","-v","1234","/foo"});
assertEquals("Returned should be 1",1,ret);
String str=out.toString();
assertTrue("Permission denied printed",str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
}
);
int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a1","-v","1234","/foo"});
assertEquals("Returned should be 0",0,ret);
out.reset();
fs.setPermission(p,new FsPermission((short)0750));
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
int ret=ToolRunner.run(fshell,new String[]{"-getfattr","-n","user.a1","/foo"});
assertEquals("Returned should be 1",1,ret);
String str=out.toString();
assertTrue("Permission denied printed",str.indexOf("Permission denied") != -1);
out.reset();
ret=ToolRunner.run(fshell,new String[]{"-setfattr","-x","user.a1","/foo"});
assertEquals("Returned should be 1",1,ret);
str=out.toString();
assertTrue("Permission denied printed",str.indexOf("Permission denied") != -1);
out.reset();
return null;
}
}
);
}
finally {
if (bak != null) {
System.setErr(bak);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifierEqualityVerifier
@Test(timeout=30000) public void testCopyCommandsWithForceOption() throws Exception {
Configuration conf=new Configuration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
FsShell shell=null;
FileSystem fs=null;
final File localFile=new File(TEST_ROOT_DIR,"testFileForPut");
final String localfilepath=new Path(localFile.getAbsolutePath()).toUri().toString();
final String testdir="/tmp/TestDFSShell-testCopyCommandsWithForceOption-" + counter.getAndIncrement();
final Path hdfsTestDir=new Path(testdir);
try {
fs=cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
localFile.createNewFile();
writeFile(fs,new Path(testdir,"testFileForPut"));
shell=new FsShell();
String[] argv=new String[]{"-put","-f",localfilepath,testdir};
int res=ToolRunner.run(shell,argv);
assertEquals("put -f is not working",SUCCESS,res);
argv=new String[]{"-put",localfilepath,testdir};
res=ToolRunner.run(shell,argv);
assertEquals("put command itself is able to overwrite the file",ERROR,res);
argv=new String[]{"-copyFromLocal","-f",localfilepath,testdir};
res=ToolRunner.run(shell,argv);
assertEquals("copyFromLocal -f is not working",SUCCESS,res);
argv=new String[]{"-copyFromLocal",localfilepath,testdir};
res=ToolRunner.run(shell,argv);
assertEquals("copyFromLocal command itself is able to overwrite the file",ERROR,res);
argv=new String[]{"-cp","-f",localfilepath,testdir};
res=ToolRunner.run(shell,argv);
assertEquals("cp -f is not working",SUCCESS,res);
argv=new String[]{"-cp",localfilepath,testdir};
res=ToolRunner.run(shell,argv);
assertEquals("cp command itself is able to overwrite the file",ERROR,res);
}
finally {
if (null != shell) shell.close();
if (localFile.exists()) localFile.delete();
if (null != fs) {
fs.delete(hdfsTestDir,true);
fs.close();
}
cluster.shutdown();
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=120000) public void testCopyCommandsToDirectoryWithPreserveOption() throws Exception {
Configuration conf=new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY,true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
FsShell shell=null;
FileSystem fs=null;
final String testdir="/tmp/TestDFSShell-testCopyCommandsToDirectoryWithPreserveOption-" + counter.getAndIncrement();
final Path hdfsTestDir=new Path(testdir);
try {
fs=cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
Path srcDir=new Path(hdfsTestDir,"srcDir");
fs.mkdirs(srcDir);
fs.setAcl(srcDir,Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,GROUP,"bar",READ_EXECUTE),aclEntry(ACCESS,OTHER,EXECUTE)));
fs.setPermission(srcDir,new FsPermission(ALL,READ_EXECUTE,EXECUTE,true));
Path srcFile=new Path(srcDir,"srcFile");
fs.create(srcFile).close();
FileStatus status=fs.getFileStatus(srcDir);
final long mtime=status.getModificationTime();
final long atime=status.getAccessTime();
final String owner=status.getOwner();
final String group=status.getGroup();
final FsPermission perm=status.getPermission();
fs.setXAttr(srcDir,USER_A1,USER_A1_VALUE);
fs.setXAttr(srcDir,TRUSTED_A1,TRUSTED_A1_VALUE);
shell=new FsShell(conf);
Path targetDir1=new Path(hdfsTestDir,"targetDir1");
String[] argv=new String[]{"-cp","-p",srcDir.toUri().toString(),targetDir1.toUri().toString()};
int ret=ToolRunner.run(shell,argv);
assertEquals("cp -p is not working",SUCCESS,ret);
FileStatus targetStatus=fs.getFileStatus(targetDir1);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
FsPermission targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
Map xattrs=fs.getXAttrs(targetDir1);
assertTrue(xattrs.isEmpty());
List acls=fs.getAclStatus(targetDir1).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path targetDir2=new Path(hdfsTestDir,"targetDir2");
argv=new String[]{"-cp","-ptop",srcDir.toUri().toString(),targetDir2.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptop is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(targetDir2);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(targetDir2);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(targetDir2).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path targetDir3=new Path(hdfsTestDir,"targetDir3");
argv=new String[]{"-cp","-ptopx",srcDir.toUri().toString(),targetDir3.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptopx is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(targetDir3);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(targetDir3);
assertEquals(xattrs.size(),2);
assertArrayEquals(USER_A1_VALUE,xattrs.get(USER_A1));
assertArrayEquals(TRUSTED_A1_VALUE,xattrs.get(TRUSTED_A1));
acls=fs.getAclStatus(targetDir3).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path targetDir4=new Path(hdfsTestDir,"targetDir4");
argv=new String[]{"-cp","-ptopa",srcDir.toUri().toString(),targetDir4.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptopa is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(targetDir4);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(targetDir4);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(targetDir4).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(srcDir),fs.getAclStatus(targetDir4));
Path targetDir5=new Path(hdfsTestDir,"targetDir5");
argv=new String[]{"-cp","-ptoa",srcDir.toUri().toString(),targetDir5.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptoa is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(targetDir5);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(targetDir5);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(targetDir5).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(srcDir),fs.getAclStatus(targetDir5));
}
finally {
if (shell != null) {
shell.close();
}
if (fs != null) {
fs.delete(hdfsTestDir,true);
fs.close();
}
cluster.shutdown();
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=120000) public void testCopyCommandsPreserveAclAndStickyBit() throws Exception {
Configuration conf=new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
FsShell shell=null;
FileSystem fs=null;
final String testdir="/tmp/TestDFSShell-testCopyCommandsPreserveAclAndStickyBit-" + counter.getAndIncrement();
final Path hdfsTestDir=new Path(testdir);
try {
fs=cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
Path src=new Path(hdfsTestDir,"srcfile");
fs.create(src).close();
fs.setAcl(src,Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,GROUP,"bar",READ_EXECUTE),aclEntry(ACCESS,OTHER,EXECUTE)));
fs.setPermission(src,new FsPermission(ALL,READ_EXECUTE,EXECUTE,true));
FileStatus status=fs.getFileStatus(src);
final long mtime=status.getModificationTime();
final long atime=status.getAccessTime();
final String owner=status.getOwner();
final String group=status.getGroup();
final FsPermission perm=status.getPermission();
shell=new FsShell(conf);
Path target1=new Path(hdfsTestDir,"targetfile1");
String[] argv=new String[]{"-cp","-p",src.toUri().toString(),target1.toUri().toString()};
int ret=ToolRunner.run(shell,argv);
assertEquals("cp is not working",SUCCESS,ret);
FileStatus targetStatus=fs.getFileStatus(target1);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
FsPermission targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
List acls=fs.getAclStatus(target1).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path target2=new Path(hdfsTestDir,"targetfile2");
argv=new String[]{"-cp","-ptopa",src.toUri().toString(),target2.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptopa is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(target2);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
acls=fs.getAclStatus(target2).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(src),fs.getAclStatus(target2));
}
finally {
if (null != shell) {
shell.close();
}
if (null != fs) {
fs.delete(hdfsTestDir,true);
fs.close();
}
cluster.shutdown();
}
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=120000) public void testGetFAttrErrors() throws Exception {
final UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"});
MiniDFSCluster cluster=null;
PrintStream bakErr=null;
try {
final Configuration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final FileSystem fs=cluster.getFileSystem();
final Path p=new Path("/foo");
fs.mkdirs(p);
bakErr=System.err;
final FsShell fshell=new FsShell(conf);
final ByteArrayOutputStream out=new ByteArrayOutputStream();
System.setErr(new PrintStream(out));
fs.setPermission(p,new FsPermission((short)0700));
{
final int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a1","-v","1234","/foo"});
assertEquals("Returned should be 0",0,ret);
out.reset();
}
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
int ret=ToolRunner.run(fshell,new String[]{"-getfattr","-n","user.a1","/foo"});
String str=out.toString();
assertTrue("xattr value was incorrectly returned",str.indexOf("1234") == -1);
out.reset();
return null;
}
}
);
{
final int ret=ToolRunner.run(fshell,new String[]{"-getfattr","-n","user.nonexistent","/foo"});
String str=out.toString();
assertTrue("xattr value was incorrectly returned",str.indexOf("getfattr: At least one of the attributes provided was not found") >= 0);
out.reset();
}
}
finally {
if (bakErr != null) {
System.setErr(bakErr);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=120000) public void testCopyCommandsWithPreserveOption() throws Exception {
Configuration conf=new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY,true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
FsShell shell=null;
FileSystem fs=null;
final String testdir="/tmp/TestDFSShell-testCopyCommandsWithPreserveOption-" + counter.getAndIncrement();
final Path hdfsTestDir=new Path(testdir);
try {
fs=cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
Path src=new Path(hdfsTestDir,"srcfile");
fs.create(src).close();
fs.setAcl(src,Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,GROUP,"bar",READ_EXECUTE),aclEntry(ACCESS,OTHER,EXECUTE)));
FileStatus status=fs.getFileStatus(src);
final long mtime=status.getModificationTime();
final long atime=status.getAccessTime();
final String owner=status.getOwner();
final String group=status.getGroup();
final FsPermission perm=status.getPermission();
fs.setXAttr(src,USER_A1,USER_A1_VALUE);
fs.setXAttr(src,TRUSTED_A1,TRUSTED_A1_VALUE);
shell=new FsShell(conf);
Path target1=new Path(hdfsTestDir,"targetfile1");
String[] argv=new String[]{"-cp","-p",src.toUri().toString(),target1.toUri().toString()};
int ret=ToolRunner.run(shell,argv);
assertEquals("cp -p is not working",SUCCESS,ret);
FileStatus targetStatus=fs.getFileStatus(target1);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
FsPermission targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
Map xattrs=fs.getXAttrs(target1);
assertTrue(xattrs.isEmpty());
List acls=fs.getAclStatus(target1).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path target2=new Path(hdfsTestDir,"targetfile2");
argv=new String[]{"-cp","-ptop",src.toUri().toString(),target2.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptop is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(target2);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(target2);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(target2).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path target3=new Path(hdfsTestDir,"targetfile3");
argv=new String[]{"-cp","-ptopx",src.toUri().toString(),target3.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptopx is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(target3);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(target3);
assertEquals(xattrs.size(),2);
assertArrayEquals(USER_A1_VALUE,xattrs.get(USER_A1));
assertArrayEquals(TRUSTED_A1_VALUE,xattrs.get(TRUSTED_A1));
acls=fs.getAclStatus(target3).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path target4=new Path(hdfsTestDir,"targetfile4");
argv=new String[]{"-cp","-ptopa",src.toUri().toString(),target4.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptopa is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(target4);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(target4);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(target4).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(src),fs.getAclStatus(target4));
Path target5=new Path(hdfsTestDir,"targetfile5");
argv=new String[]{"-cp","-ptoa",src.toUri().toString(),target5.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptoa is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(target5);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(target5);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(target5).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(src),fs.getAclStatus(target5));
}
finally {
if (null != shell) {
shell.close();
}
if (null != fs) {
fs.delete(hdfsTestDir,true);
fs.close();
}
cluster.shutdown();
}
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* This test ensures the appropriate response (successful or failure) from
* a Datanode when the system is started with differing version combinations.
*
* For each 3-tuple in the cross product
* ({oldLayoutVersion,currentLayoutVersion,futureLayoutVersion},
* {currentNamespaceId,incorrectNamespaceId},
* {pastFsscTime,currentFsscTime,futureFsscTime})
* 1. Startup Namenode with version file containing
* (currentLayoutVersion,currentNamespaceId,currentFsscTime)
* 2. Attempt to startup Datanode with version file containing
* this iterations version 3-tuple
*
IterativeVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test upgrade from 2.0 image with a variety of .snapshot and .reserved
* paths to test renaming on upgrade
*/
@Test public void testUpgradeFromRel2ReservedImage() throws Exception {
unpackStorage(HADOOP2_RESERVED_IMAGE,HADOOP_DFS_DIR_TXT);
MiniDFSCluster cluster=null;
final Configuration conf=new Configuration();
try {
cluster=new MiniDFSCluster.Builder(conf).format(false).startupOption(StartupOption.UPGRADE).numDataNodes(0).build();
}
catch ( IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("reserved path component in this version",e);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
try {
FSImageFormat.setRenameReservedPairs(".snapshot=.user-snapshot," + ".reserved=.my-reserved");
cluster=new MiniDFSCluster.Builder(conf).format(false).startupOption(StartupOption.UPGRADE).numDataNodes(0).build();
DistributedFileSystem dfs=cluster.getFileSystem();
final String[] expected=new String[]{"/edits","/edits/.reserved","/edits/.user-snapshot","/edits/.user-snapshot/editsdir","/edits/.user-snapshot/editsdir/editscontents","/edits/.user-snapshot/editsdir/editsdir2","/image","/image/.reserved","/image/.user-snapshot","/image/.user-snapshot/imagedir","/image/.user-snapshot/imagedir/imagecontents","/image/.user-snapshot/imagedir/imagedir2","/.my-reserved","/.my-reserved/edits-touch","/.my-reserved/image-touch"};
for (int i=0; i < 2; i++) {
if (i == 1) {
cluster.finalizeCluster(conf);
cluster.restartNameNode(true);
}
ArrayList toList=new ArrayList();
toList.add(new Path("/"));
ArrayList found=new ArrayList();
while (!toList.isEmpty()) {
Path p=toList.remove(0);
FileStatus[] statuses=dfs.listStatus(p);
for ( FileStatus status : statuses) {
final String path=status.getPath().toUri().getPath();
System.out.println("Found path " + path);
found.add(path);
if (status.isDirectory()) {
toList.add(status.getPath());
}
}
}
for ( String s : expected) {
assertTrue("Did not find expected path " + s,found.contains(s));
}
assertEquals("Found an unexpected path while listing filesystem",found.size(),expected.length);
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
IterativeVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test upgrade from a branch-1.2 image with reserved paths
*/
@Test public void testUpgradeFromRel1ReservedImage() throws Exception {
unpackStorage(HADOOP1_RESERVED_IMAGE,HADOOP_DFS_DIR_TXT);
MiniDFSCluster cluster=null;
final Configuration conf=new Configuration();
try {
FSImageFormat.setRenameReservedPairs(".snapshot=.user-snapshot," + ".reserved=.my-reserved");
cluster=new MiniDFSCluster.Builder(conf).format(false).startupOption(StartupOption.UPGRADE).numDataNodes(0).build();
DistributedFileSystem dfs=cluster.getFileSystem();
final String[] expected=new String[]{"/.my-reserved","/.user-snapshot","/.user-snapshot/.user-snapshot","/.user-snapshot/open","/dir1","/dir1/.user-snapshot","/dir2","/dir2/.user-snapshot","/user","/user/andrew","/user/andrew/.user-snapshot"};
for (int i=0; i < 2; i++) {
if (i == 1) {
cluster.finalizeCluster(conf);
cluster.restartNameNode(true);
}
ArrayList toList=new ArrayList();
toList.add(new Path("/"));
ArrayList found=new ArrayList();
while (!toList.isEmpty()) {
Path p=toList.remove(0);
FileStatus[] statuses=dfs.listStatus(p);
for ( FileStatus status : statuses) {
final String path=status.getPath().toUri().getPath();
System.out.println("Found path " + path);
found.add(path);
if (status.isDirectory()) {
toList.add(status.getPath());
}
}
}
for ( String s : expected) {
assertTrue("Did not find expected path " + s,found.contains(s));
}
assertEquals("Found an unexpected path while listing filesystem",found.size(),expected.length);
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
IterativeVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test upgrade from a 0.23.11 image with reserved paths
*/
@Test public void testUpgradeFromRel023ReservedImage() throws Exception {
unpackStorage(HADOOP023_RESERVED_IMAGE,HADOOP_DFS_DIR_TXT);
MiniDFSCluster cluster=null;
final Configuration conf=new Configuration();
try {
FSImageFormat.setRenameReservedPairs(".snapshot=.user-snapshot," + ".reserved=.my-reserved");
cluster=new MiniDFSCluster.Builder(conf).format(false).startupOption(StartupOption.UPGRADE).numDataNodes(0).build();
DistributedFileSystem dfs=cluster.getFileSystem();
final String[] expected=new String[]{"/.user-snapshot","/dir1","/dir1/.user-snapshot","/dir2","/dir2/.user-snapshot"};
for (int i=0; i < 2; i++) {
if (i == 1) {
cluster.finalizeCluster(conf);
cluster.restartNameNode(true);
}
ArrayList toList=new ArrayList();
toList.add(new Path("/"));
ArrayList found=new ArrayList();
while (!toList.isEmpty()) {
Path p=toList.remove(0);
FileStatus[] statuses=dfs.listStatus(p);
for ( FileStatus status : statuses) {
final String path=status.getPath().toUri().getPath();
System.out.println("Found path " + path);
found.add(path);
if (status.isDirectory()) {
toList.add(status.getPath());
}
}
}
for ( String s : expected) {
assertTrue("Did not find expected path " + s,found.contains(s));
}
assertEquals("Found an unexpected path while listing filesystem",found.size(),expected.length);
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test upgrade from 0.22 image with corrupt md5, make sure it
* fails to upgrade
*/
@Test public void testUpgradeFromCorruptRel22Image() throws IOException {
unpackStorage(HADOOP22_IMAGE,HADOOP_DFS_DIR_TXT);
File baseDir=new File(MiniDFSCluster.getBaseDirectory());
FSImageTestUtil.corruptVersionFile(new File(baseDir,"name1/current/VERSION"),"imageMD5Digest","22222222222222222222222222222222");
FSImageTestUtil.corruptVersionFile(new File(baseDir,"name2/current/VERSION"),"imageMD5Digest","22222222222222222222222222222222");
final LogVerificationAppender appender=new LogVerificationAppender();
final Logger logger=Logger.getRootLogger();
logger.addAppender(appender);
try {
upgradeAndVerify(new MiniDFSCluster.Builder(upgradeConf).numDataNodes(4));
fail("Upgrade did not fail with bad MD5");
}
catch ( IOException ioe) {
String msg=StringUtils.stringifyException(ioe);
if (!msg.contains("Failed to load an FSImage file")) {
throw ioe;
}
int md5failures=appender.countExceptionsWithMessage(" is corrupt with MD5 checksum of ");
assertEquals("Upgrade did not fail with bad MD5",1,md5failures);
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Ensure that fs.defaultFS is set in the configuration even if neither HA nor
* Federation is enabled.
* Regression test for HDFS-3351.
*/
@Test public void testConfModificationNoFederationOrHa(){
final HdfsConfiguration conf=new HdfsConfiguration();
String nsId=null;
String nnId=null;
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY,"localhost:1234");
assertFalse("hdfs://localhost:1234".equals(conf.get(FS_DEFAULT_NAME_KEY)));
NameNode.initializeGenericKeys(conf,nsId,nnId);
assertEquals("hdfs://localhost:1234",conf.get(FS_DEFAULT_NAME_KEY));
}
InternalCallVerifierEqualityVerifier
/**
* Test to ensure nameservice specific keys in the configuration are
* copied to generic keys when the namenode starts.
*/
@Test public void testConfModificationFederationAndHa(){
final HdfsConfiguration conf=new HdfsConfiguration();
String nsId="ns1";
String nnId="nn1";
conf.set(DFS_NAMESERVICES,nsId);
conf.set(DFS_NAMESERVICE_ID,nsId);
conf.set(DFS_HA_NAMENODES_KEY_PREFIX + "." + nsId,nnId);
for ( String key : NameNode.NAMENODE_SPECIFIC_KEYS) {
conf.set(DFSUtil.addKeySuffixes(key,nsId,nnId),key);
}
NameNode.initializeGenericKeys(conf,nsId,nnId);
for ( String key : NameNode.NAMENODE_SPECIFIC_KEYS) {
assertEquals(key,conf.get(key));
}
}
APIUtilityVerifierEqualityVerifier
@Test public void testGetInfoServer() throws IOException, URISyntaxException {
HdfsConfiguration conf=new HdfsConfiguration();
URI httpsport=DFSUtil.getInfoServer(null,conf,"https");
assertEquals(new URI("https",null,"0.0.0.0",DFS_NAMENODE_HTTPS_PORT_DEFAULT,null,null,null),httpsport);
URI httpport=DFSUtil.getInfoServer(null,conf,"http");
assertEquals(new URI("http",null,"0.0.0.0",DFS_NAMENODE_HTTP_PORT_DEFAULT,null,null,null),httpport);
URI httpAddress=DFSUtil.getInfoServer(new InetSocketAddress("localhost",8020),conf,"http");
assertEquals(URI.create("http://localhost:" + DFS_NAMENODE_HTTP_PORT_DEFAULT),httpAddress);
}
EqualityVerifier
/**
* Test {@link DFSUtil#getNamenodeNameServiceId(Configuration)} to ensure
* nameserviceId for namenode is determined based on matching the address with
* local node's address
*/
@Test public void getNameNodeNameServiceId(){
Configuration conf=setupAddress(DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals("nn1",DFSUtil.getNamenodeNameServiceId(conf));
}
APIUtilityVerifierEqualityVerifier
@Test public void testGetHaNnHttpAddresses() throws IOException {
final String LOGICAL_HOST_NAME="ns1";
final String NS1_NN1_ADDR="ns1-nn1.example.com:8020";
final String NS1_NN2_ADDR="ns1-nn2.example.com:8020";
Configuration conf=createWebHDFSHAConfiguration(LOGICAL_HOST_NAME,NS1_NN1_ADDR,NS1_NN2_ADDR);
Map> map=DFSUtil.getHaNnWebHdfsAddresses(conf,"webhdfs");
assertEquals(NS1_NN1_ADDR,map.get("ns1").get("nn1").toString());
assertEquals(NS1_NN2_ADDR,map.get("ns1").get("nn2").toString());
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Tests to ensure default namenode is used as fallback
*/
@Test public void testDefaultNamenode() throws IOException {
HdfsConfiguration conf=new HdfsConfiguration();
final String hdfs_default="hdfs://localhost:9999/";
conf.set(FS_DEFAULT_NAME_KEY,hdfs_default);
Map> addrMap=DFSUtil.getNNServiceRpcAddresses(conf);
assertEquals(1,addrMap.size());
Map defaultNsMap=addrMap.get(null);
assertEquals(1,defaultNsMap.size());
assertEquals(9999,defaultNsMap.get(null).getPort());
}
InternalCallVerifierEqualityVerifier
@Test public void getNameNodeServiceAddr() throws IOException {
HdfsConfiguration conf=new HdfsConfiguration();
final String NS1_NN1_HOST="ns1-nn1.example.com:8020";
final String NS1_NN1_HOST_SVC="ns1-nn2.example.com:8021";
final String NS1_NN2_HOST="ns1-nn1.example.com:8020";
final String NS1_NN2_HOST_SVC="ns1-nn2.example.com:8021";
conf.set(DFS_NAMESERVICES,"ns1");
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX,"ns1"),"nn1,nn2");
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","nn1"),NS1_NN1_HOST);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","nn2"),NS1_NN2_HOST);
assertEquals(NS1_NN1_HOST,DFSUtil.getNamenodeServiceAddr(conf,null,"nn1"));
assertEquals(NS1_NN2_HOST,DFSUtil.getNamenodeServiceAddr(conf,null,"nn2"));
assertEquals(NS1_NN1_HOST,DFSUtil.getNamenodeServiceAddr(conf,"ns1","nn1"));
assertEquals(null,DFSUtil.getNamenodeServiceAddr(conf,"invalid","nn1"));
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,"ns1","nn1"),NS1_NN1_HOST_SVC);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,"ns1","nn2"),NS1_NN2_HOST_SVC);
assertEquals(NS1_NN1_HOST_SVC,DFSUtil.getNamenodeServiceAddr(conf,null,"nn1"));
assertEquals(NS1_NN2_HOST_SVC,DFSUtil.getNamenodeServiceAddr(conf,null,"nn2"));
assertEquals("ns1",DFSUtil.getNamenodeNameServiceId(conf));
assertEquals("ns1",DFSUtil.getSecondaryNameServiceId(conf));
}
EqualityVerifier
/**
* Test {@link DFSUtil#getSecondaryNameServiceId(Configuration)} to ensure
* nameserviceId for backup node is determined based on matching the address
* with local node's address
*/
@Test public void getSecondaryNameServiceId(){
Configuration conf=setupAddress(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
assertEquals("nn1",DFSUtil.getSecondaryNameServiceId(conf));
}
InternalCallVerifierEqualityVerifier
/**
* Test {@link DFSUtil#getNamenodeNameServiceId(Configuration)} to ensure
* nameserviceId from the configuration returned
*/
@Test public void getNameServiceId(){
HdfsConfiguration conf=new HdfsConfiguration();
conf.set(DFS_NAMESERVICE_ID,"nn1");
assertEquals("nn1",DFSUtil.getNamenodeNameServiceId(conf));
}
EqualityVerifier
/**
* Test {@link DFSUtil#getBackupNameServiceId(Configuration)} to ensure
* nameserviceId for backup node is determined based on matching the address
* with local node's address
*/
@Test public void getBackupNameServiceId(){
Configuration conf=setupAddress(DFS_NAMENODE_BACKUP_ADDRESS_KEY);
assertEquals("nn1",DFSUtil.getBackupNameServiceId(conf));
}
APIUtilityVerifierInternalCallVerifierAssumptionSetterEqualityVerifierConditionMatcherHybridVerifier
@Test(timeout=15000) public void testLocalhostReverseLookup(){
Assume.assumeTrue(!Shell.WINDOWS);
HdfsConfiguration conf=new HdfsConfiguration();
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"hdfs://127.0.0.1:8020");
Collection uris=DFSUtil.getNameServiceUris(conf);
assertEquals(1,uris.size());
for ( URI uri : uris) {
assertThat(uri.getHost(),not("127.0.0.1"));
}
}
InternalCallVerifierEqualityVerifier
/**
* Test to ensure nameservice specific keys in the configuration are
* copied to generic keys when the namenode starts.
*/
@Test public void testConfModificationFederationOnly(){
final HdfsConfiguration conf=new HdfsConfiguration();
String nsId="ns1";
conf.set(DFS_NAMESERVICES,nsId);
conf.set(DFS_NAMESERVICE_ID,nsId);
for ( String key : NameNode.NAMENODE_SPECIFIC_KEYS) {
conf.set(DFSUtil.addKeySuffixes(key,nsId),key);
}
NameNode.initializeGenericKeys(conf,nsId,null);
for ( String key : NameNode.NAMENODE_SPECIFIC_KEYS) {
assertEquals(key,conf.get(key));
}
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test conversion of LocatedBlock to BlockLocation
*/
@Test public void testLocatedBlocks2Locations(){
DatanodeInfo d=DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] ds=new DatanodeInfo[1];
ds[0]=d;
ExtendedBlock b1=new ExtendedBlock("bpid",1,1,1);
LocatedBlock l1=new LocatedBlock(b1,ds,0,false);
ExtendedBlock b2=new ExtendedBlock("bpid",2,1,1);
LocatedBlock l2=new LocatedBlock(b2,ds,0,true);
List ls=Arrays.asList(l1,l2);
LocatedBlocks lbs=new LocatedBlocks(10,false,ls,l2,true,null);
BlockLocation[] bs=DFSUtil.locatedBlocks2Locations(lbs);
assertTrue("expected 2 blocks but got " + bs.length,bs.length == 2);
int corruptCount=0;
for ( BlockLocation b : bs) {
if (b.isCorrupt()) {
corruptCount++;
}
}
assertTrue("expected 1 corrupt files but got " + corruptCount,corruptCount == 1);
bs=DFSUtil.locatedBlocks2Locations(new LocatedBlocks());
assertEquals(0,bs.length);
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test for {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}{@link DFSUtil#getNameServiceIdFromAddress(Configuration,InetSocketAddress,String)(Configuration)}
*/
@Test public void testMultipleNamenodes() throws IOException {
HdfsConfiguration conf=new HdfsConfiguration();
conf.set(DFS_NAMESERVICES,"nn1,nn2");
final String NN1_ADDRESS="localhost:9000";
final String NN2_ADDRESS="localhost:9001";
final String NN3_ADDRESS="localhost:9002";
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"nn1"),NN1_ADDRESS);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"nn2"),NN2_ADDRESS);
Map> nnMap=DFSUtil.getNNServiceRpcAddresses(conf);
assertEquals(2,nnMap.size());
Map nn1Map=nnMap.get("nn1");
assertEquals(1,nn1Map.size());
InetSocketAddress addr=nn1Map.get(null);
assertEquals("localhost",addr.getHostName());
assertEquals(9000,addr.getPort());
Map nn2Map=nnMap.get("nn2");
assertEquals(1,nn2Map.size());
addr=nn2Map.get(null);
assertEquals("localhost",addr.getHostName());
assertEquals(9001,addr.getPort());
checkNameServiceId(conf,NN1_ADDRESS,"nn1");
checkNameServiceId(conf,NN2_ADDRESS,"nn2");
checkNameServiceId(conf,NN3_ADDRESS,null);
assertFalse(HAUtil.isHAEnabled(conf,"nn1"));
assertFalse(HAUtil.isHAEnabled(conf,"nn2"));
}
BranchVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Regression test for HDFS-894 ensures that, when datanodes
* are restarted, the new IPC port is registered with the
* namenode.
*/
@Test public void testChangeIpcPort() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).build();
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(addr,conf);
cluster.restartDataNodes();
DatanodeInfo[] report=client.datanodeReport(DatanodeReportType.ALL);
long firstUpdateAfterRestart=report[0].getLastUpdate();
boolean gotHeartbeat=false;
for (int i=0; i < 10 && !gotHeartbeat; i++) {
try {
Thread.sleep(i * 1000);
}
catch ( InterruptedException ie) {
}
report=client.datanodeReport(DatanodeReportType.ALL);
gotHeartbeat=(report[0].getLastUpdate() > firstUpdateAfterRestart);
}
if (!gotHeartbeat) {
fail("Never got a heartbeat from restarted datanode.");
}
int realIpcPort=cluster.getDataNodes().get(0).getIpcPort();
assertEquals(realIpcPort,report[0].getIpcPort());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifierEqualityVerifier
@Test public void testChangeStorageID() throws Exception {
final String DN_IP_ADDR="127.0.0.1";
final String DN_HOSTNAME="localhost";
final int DN_XFER_PORT=12345;
final int DN_INFO_PORT=12346;
final int DN_INFO_SECURE_PORT=12347;
final int DN_IPC_PORT=12348;
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(addr,conf);
NamenodeProtocols rpcServer=cluster.getNameNodeRpc();
DatanodeID dnId=new DatanodeID(DN_IP_ADDR,DN_HOSTNAME,"fake-datanode-id",DN_XFER_PORT,DN_INFO_PORT,DN_INFO_SECURE_PORT,DN_IPC_PORT);
long nnCTime=cluster.getNamesystem().getFSImage().getStorage().getCTime();
StorageInfo mockStorageInfo=mock(StorageInfo.class);
doReturn(nnCTime).when(mockStorageInfo).getCTime();
doReturn(HdfsConstants.DATANODE_LAYOUT_VERSION).when(mockStorageInfo).getLayoutVersion();
DatanodeRegistration dnReg=new DatanodeRegistration(dnId,mockStorageInfo,null,VersionInfo.getVersion());
rpcServer.registerDatanode(dnReg);
DatanodeInfo[] report=client.datanodeReport(DatanodeReportType.ALL);
assertEquals("Expected a registered datanode",1,report.length);
dnId=new DatanodeID(DN_IP_ADDR,DN_HOSTNAME,"changed-fake-datanode-id",DN_XFER_PORT,DN_INFO_PORT,DN_INFO_SECURE_PORT,DN_IPC_PORT);
dnReg=new DatanodeRegistration(dnId,mockStorageInfo,null,VersionInfo.getVersion());
rpcServer.registerDatanode(dnReg);
report=client.datanodeReport(DatanodeReportType.ALL);
assertEquals("Datanode with changed storage ID not recognized",1,report.length);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
BooleanVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
/**
* Ensure the datanode manager does not do host lookup after registration,
* especially for node reports.
* @throws Exception
*/
@Test public void testDNSLookups() throws Exception {
MonitorDNS sm=new MonitorDNS();
System.setSecurityManager(sm);
MiniDFSCluster cluster=null;
try {
HdfsConfiguration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(8).build();
cluster.waitActive();
int initialLookups=sm.lookups;
assertTrue("dns security manager is active",initialLookups != 0);
DatanodeManager dm=cluster.getNamesystem().getBlockManager().getDatanodeManager();
dm.refreshNodes(conf);
assertEquals(initialLookups,sm.lookups);
dm.refreshNodes(conf);
assertEquals(initialLookups,sm.lookups);
dm.getDatanodeListForReport(DatanodeReportType.ALL);
assertEquals(initialLookups,sm.lookups);
dm.getDatanodeListForReport(DatanodeReportType.LIVE);
assertEquals(initialLookups,sm.lookups);
dm.getDatanodeListForReport(DatanodeReportType.DEAD);
assertEquals(initialLookups,sm.lookups);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
System.setSecurityManager(null);
}
}
Class: org.apache.hadoop.hdfs.TestDecommission
APIUtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Tests decommission with replicas on the target datanode cannot be migrated
* to other datanodes and satisfy the replication factor. Make sure the
* datanode won't get stuck in decommissioning state.
*/
@Test(timeout=360000) public void testDecommission2() throws IOException {
LOG.info("Starting test testDecommission");
int numNamenodes=1;
int numDatanodes=4;
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
startCluster(numNamenodes,numDatanodes,conf);
ArrayList> namenodeDecomList=new ArrayList>(numNamenodes);
namenodeDecomList.add(0,new ArrayList(numDatanodes));
Path file1=new Path("testDecommission2.dat");
int replicas=4;
ArrayList decommissionedNodes=namenodeDecomList.get(0);
FileSystem fileSys=cluster.getFileSystem(0);
FSNamesystem ns=cluster.getNamesystem(0);
writeFile(fileSys,file1,replicas);
int deadDecomissioned=ns.getNumDecomDeadDataNodes();
int liveDecomissioned=ns.getNumDecomLiveDataNodes();
DatanodeInfo decomNode=decommissionNode(0,null,decommissionedNodes,AdminStates.DECOMMISSIONED);
decommissionedNodes.add(decomNode);
assertEquals(deadDecomissioned,ns.getNumDecomDeadDataNodes());
assertEquals(liveDecomissioned + 1,ns.getNumDecomLiveDataNodes());
DFSClient client=getDfsClient(cluster.getNameNode(0),conf);
assertEquals("All datanodes must be alive",numDatanodes,client.datanodeReport(DatanodeReportType.LIVE).length);
assertNull(checkFile(fileSys,file1,replicas,decomNode.getXferAddr(),numDatanodes));
cleanupFile(fileSys,file1);
cluster.shutdown();
startCluster(1,4,conf);
cluster.shutdown();
}
APIUtilityVerifierIterativeVerifierBranchVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test using a "registration name" in a host include file.
* Registration names are DataNode names specified in the configuration by
* dfs.datanode.hostname. The DataNode will send this name to the NameNode
* as part of its registration. Registration names are helpful when you
* want to override the normal first result of DNS resolution on the
* NameNode. For example, a given datanode IP may map to two hostnames,
* and you may want to choose which hostname is used internally in the
* cluster.
* It is not recommended to use a registration name which is not also a
* valid DNS hostname for the DataNode. See HDFS-5237 for background.
*/
@Test(timeout=360000) public void testIncludeByRegistrationName() throws IOException, InterruptedException {
Configuration hdfsConf=new Configuration(conf);
final String registrationName="127.0.0.100";
final String nonExistentDn="127.0.0.10";
hdfsConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY,registrationName);
cluster=new MiniDFSCluster.Builder(hdfsConf).numDataNodes(1).checkDataNodeHostConfig(true).setupHostsFile(true).build();
cluster.waitActive();
ArrayList nodes=new ArrayList();
nodes.add(nonExistentDn);
writeConfigFile(hostsFile,nodes);
refreshNodes(cluster.getNamesystem(0),hdfsConf);
DFSClient client=getDfsClient(cluster.getNameNode(0),hdfsConf);
while (true) {
DatanodeInfo info[]=client.datanodeReport(DatanodeReportType.DEAD);
if (info.length == 1) {
break;
}
LOG.info("Waiting for datanode to be marked dead");
Thread.sleep(HEARTBEAT_INTERVAL * 1000);
}
int dnPort=cluster.getDataNodes().get(0).getXferPort();
nodes=new ArrayList();
nodes.add(registrationName + ":" + dnPort);
writeConfigFile(hostsFile,nodes);
refreshNodes(cluster.getNamesystem(0),hdfsConf);
cluster.restartDataNode(0);
while (true) {
DatanodeInfo info[]=client.datanodeReport(DatanodeReportType.LIVE);
if (info.length == 1) {
Assert.assertFalse(info[0].isDecommissioned());
Assert.assertFalse(info[0].isDecommissionInProgress());
assertEquals(registrationName,info[0].getHostName());
break;
}
LOG.info("Waiting for datanode to come back");
Thread.sleep(HEARTBEAT_INTERVAL * 1000);
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Tests restart of namenode while datanode hosts are added to exclude file
*/
@Test(timeout=360000) public void testDecommissionWithNamenodeRestart() throws IOException, InterruptedException {
LOG.info("Starting test testDecommissionWithNamenodeRestart");
int numNamenodes=1;
int numDatanodes=1;
int replicas=1;
startCluster(numNamenodes,numDatanodes,conf);
Path file1=new Path("testDecommission.dat");
FileSystem fileSys=cluster.getFileSystem();
writeFile(fileSys,file1,replicas);
DFSClient client=getDfsClient(cluster.getNameNode(),conf);
DatanodeInfo[] info=client.datanodeReport(DatanodeReportType.LIVE);
DatanodeID excludedDatanodeID=info[0];
String excludedDatanodeName=info[0].getXferAddr();
writeConfigFile(excludeFile,new ArrayList(Arrays.asList(excludedDatanodeName)));
cluster.startDataNodes(conf,1,true,null,null,null,null);
numDatanodes+=1;
assertEquals("Number of datanodes should be 2 ",2,cluster.getDataNodes().size());
cluster.restartNameNode();
DatanodeInfo datanodeInfo=NameNodeAdapter.getDatanode(cluster.getNamesystem(),excludedDatanodeID);
waitNodeState(datanodeInfo,AdminStates.DECOMMISSIONED);
assertEquals("All datanodes must be alive",numDatanodes,client.datanodeReport(DatanodeReportType.LIVE).length);
int tries=0;
while (tries++ < 20) {
try {
Thread.sleep(1000);
if (checkFile(fileSys,file1,replicas,datanodeInfo.getXferAddr(),numDatanodes) == null) {
break;
}
}
catch ( InterruptedException ie) {
}
}
assertTrue("Checked if block was replicated after decommission, tried " + tries + " times.",tries < 20);
cleanupFile(fileSys,file1);
cluster.shutdown();
startCluster(numNamenodes,numDatanodes,conf);
cluster.shutdown();
}
InternalCallVerifierEqualityVerifier
/**
* Test that the socket cache can be disabled by setting the capacity to
* 0. Regression test for HDFS-3365.
* @throws Exception
*/
@Test public void testDisableCache() throws Exception {
HdfsConfiguration confWithoutCache=new HdfsConfiguration();
confWithoutCache.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY,0);
BlockReaderTestUtil util=new BlockReaderTestUtil(1,confWithoutCache);
final Path testFile=new Path("/testConnCache.dat");
util.writeFile(testFile,FILE_SIZE / 1024);
FileSystem fsWithoutCache=FileSystem.newInstance(util.getConf());
try {
DFSTestUtil.readFile(fsWithoutCache,testFile);
assertEquals(0,((DistributedFileSystem)fsWithoutCache).dfs.getClientContext().getPeerCache().size());
}
finally {
fsWithoutCache.close();
util.shutdown();
}
}
InternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Tests error paths for{@link DistributedFileSystem#getFileBlockStorageLocations(java.util.List)}
*/
@Test(timeout=60000) public void testGetFileBlockStorageLocationsError() throws Exception {
final Configuration conf=getTestConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,true);
conf.setInt(DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS,1500);
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,0);
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.getDataNodes();
final DistributedFileSystem fs=cluster.getFileSystem();
final Path tmpFile1=new Path("/errorfile1.dat");
final Path tmpFile2=new Path("/errorfile2.dat");
DFSTestUtil.createFile(fs,tmpFile1,1024,(short)2,0xDEADDEADl);
DFSTestUtil.createFile(fs,tmpFile2,1024,(short)2,0xDEADDEADl);
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
try {
List list=Lists.newArrayList();
list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile1,0,1024)));
list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile2,0,1024)));
int totalRepl=0;
for ( BlockLocation loc : list) {
totalRepl+=loc.getHosts().length;
}
if (totalRepl == 4) {
return true;
}
}
catch ( IOException e) {
}
return false;
}
}
,500,30000);
BlockLocation[] blockLocs1=fs.getFileBlockLocations(tmpFile1,0,1024);
BlockLocation[] blockLocs2=fs.getFileBlockLocations(tmpFile2,0,1024);
List allLocs=Lists.newArrayList();
allLocs.addAll(Arrays.asList(blockLocs1));
allLocs.addAll(Arrays.asList(blockLocs2));
DataNodeFaultInjector injector=Mockito.mock(DataNodeFaultInjector.class);
Mockito.doAnswer(new Answer(){
@Override public Void answer( InvocationOnMock invocation) throws Throwable {
Thread.sleep(3000);
return null;
}
}
).when(injector).getHdfsBlocksMetadata();
DataNodeFaultInjector.instance=injector;
BlockStorageLocation[] locs=fs.getFileBlockStorageLocations(allLocs);
for ( BlockStorageLocation loc : locs) {
assertEquals("Found more than 0 cached hosts although RPCs supposedly timed out",0,loc.getCachedHosts().length);
}
DataNodeFaultInjector.instance=new DataNodeFaultInjector();
DataNodeProperties stoppedNode=cluster.stopDataNode(0);
locs=fs.getFileBlockStorageLocations(allLocs);
assertEquals("Expected two HdfsBlockLocation for two 1-block files",2,locs.length);
for ( BlockStorageLocation l : locs) {
assertEquals("Expected two replicas for each block",2,l.getHosts().length);
assertEquals("Expected two VolumeIDs for each block",2,l.getVolumeIds().length);
assertTrue("Expected one valid and one invalid volume",(l.getVolumeIds()[0] == null) ^ (l.getVolumeIds()[1] == null));
}
cluster.restartDataNode(stoppedNode,true);
cluster.waitActive();
fs.delete(tmpFile2,true);
HATestUtil.waitForNNToIssueDeletions(cluster.getNameNode());
cluster.triggerHeartbeats();
HATestUtil.waitForDNDeletions(cluster);
locs=fs.getFileBlockStorageLocations(allLocs);
assertEquals("Expected two HdfsBlockLocations for two 1-block files",2,locs.length);
assertNotNull(locs[0].getVolumeIds()[0]);
assertNotNull(locs[0].getVolumeIds()[1]);
assertNull(locs[1].getVolumeIds()[0]);
assertNull(locs[1].getVolumeIds()[1]);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testDFSClient() throws Exception {
Configuration conf=getTestConfiguration();
final long grace=1000L;
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final String filepathstring="/test/LeaseChecker/foo";
final Path[] filepaths=new Path[4];
for (int i=0; i < filepaths.length; i++) {
filepaths[i]=new Path(filepathstring + i);
}
final long millis=Time.now();
{
final DistributedFileSystem dfs=cluster.getFileSystem();
dfs.dfs.getLeaseRenewer().setGraceSleepPeriod(grace);
assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
{
final FSDataOutputStream out=dfs.create(filepaths[0]);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out.writeLong(millis);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out.close();
Thread.sleep(grace / 4 * 3);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
for (int i=0; i < 3; i++) {
if (dfs.dfs.getLeaseRenewer().isRunning()) {
Thread.sleep(grace / 2);
}
}
assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
}
{
final FSDataOutputStream out1=dfs.create(filepaths[1]);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
final FSDataOutputStream out2=dfs.create(filepaths[2]);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out1.writeLong(millis);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out1.close();
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out2.writeLong(millis);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out2.close();
Thread.sleep(grace / 4 * 3);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
}
{
final FSDataOutputStream out3=dfs.create(filepaths[3]);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
Thread.sleep(grace / 4 * 3);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out3.writeLong(millis);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out3.close();
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
Thread.sleep(grace / 4 * 3);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
for (int i=0; i < 3; i++) {
if (dfs.dfs.getLeaseRenewer().isRunning()) {
Thread.sleep(grace / 2);
}
}
assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
}
dfs.close();
}
{
FileSystem fs=cluster.getFileSystem();
Path dir=new Path("/wrwelkj");
assertFalse("File should not exist for test.",fs.exists(dir));
try {
FSDataInputStream in=fs.open(dir);
try {
in.close();
fs.close();
}
finally {
assertTrue("Did not get a FileNotFoundException for non-existing" + " file.",false);
}
}
catch ( FileNotFoundException fnf) {
}
}
{
final DistributedFileSystem dfs=cluster.getFileSystem();
assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
FSDataInputStream in=dfs.open(filepaths[0]);
assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
assertEquals(millis,in.readLong());
assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
in.close();
assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
dfs.close();
}
{
String uri="hdfs://127.0.0.1:" + cluster.getNameNodePort() + "/test/ipAddress/file";
Path path=new Path(uri);
FileSystem fs=FileSystem.get(path.toUri(),conf);
FSDataOutputStream out=fs.create(path);
byte[] buf=new byte[1024];
out.write(buf);
out.close();
FSDataInputStream in=fs.open(path);
in.readFully(buf);
in.close();
fs.close();
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Tests the normal path of batching up BlockLocation[]s to be passed to a
* single{@link DistributedFileSystem#getFileBlockStorageLocations(java.util.List)}call
*/
@Test(timeout=60000) public void testGetFileBlockStorageLocationsBatching() throws Exception {
final Configuration conf=getTestConfiguration();
((Log4JLogger)ProtobufRpcEngine.LOG).getLogger().setLevel(Level.TRACE);
((Log4JLogger)BlockStorageLocationUtil.LOG).getLogger().setLevel(Level.TRACE);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.TRACE);
conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,true);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
final DistributedFileSystem fs=cluster.getFileSystem();
final Path tmpFile1=new Path("/tmpfile1.dat");
final Path tmpFile2=new Path("/tmpfile2.dat");
DFSTestUtil.createFile(fs,tmpFile1,1024,(short)2,0xDEADDEADl);
DFSTestUtil.createFile(fs,tmpFile2,1024,(short)2,0xDEADDEADl);
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
try {
List list=Lists.newArrayList();
list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile1,0,1024)));
list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile2,0,1024)));
int totalRepl=0;
for ( BlockLocation loc : list) {
totalRepl+=loc.getHosts().length;
}
if (totalRepl == 4) {
return true;
}
}
catch ( IOException e) {
}
return false;
}
}
,500,30000);
BlockLocation[] blockLocs1=fs.getFileBlockLocations(tmpFile1,0,1024);
BlockLocation[] blockLocs2=fs.getFileBlockLocations(tmpFile2,0,1024);
BlockLocation[] blockLocs=(BlockLocation[])ArrayUtils.addAll(blockLocs1,blockLocs2);
BlockStorageLocation[] locs=fs.getFileBlockStorageLocations(Arrays.asList(blockLocs));
int counter=0;
for ( BlockStorageLocation l : locs) {
for (int i=0; i < l.getVolumeIds().length; i++) {
VolumeId id=l.getVolumeIds()[i];
String name=l.getNames()[i];
if (id != null) {
System.out.println("Datanode " + name + " has block "+ counter+ " on volume id "+ id.toString());
}
}
counter++;
}
assertEquals("Expected two HdfsBlockLocations for two 1-block files",2,locs.length);
for ( BlockStorageLocation l : locs) {
assertEquals("Expected two replicas for each block",2,l.getVolumeIds().length);
for (int i=0; i < l.getVolumeIds().length; i++) {
VolumeId id=l.getVolumeIds()[i];
String name=l.getNames()[i];
assertTrue("Expected block to be valid on datanode " + name,id != null);
}
}
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test getEncryptionZoneForPath as a non super user.
*/
@Test(timeout=60000) public void testGetEZAsNonSuperUser() throws Exception {
final UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"});
final Path testRoot=new Path(fsHelper.getTestRootDir());
final Path superPath=new Path(testRoot,"superuseronly");
final Path superPathFile=new Path(superPath,"file1");
final Path allPath=new Path(testRoot,"accessall");
final Path allPathFile=new Path(allPath,"file1");
final Path nonEZDir=new Path(testRoot,"nonEZDir");
final Path nonEZFile=new Path(nonEZDir,"file1");
final int len=8192;
fsWrapper.mkdir(testRoot,new FsPermission((short)0777),true);
fsWrapper.mkdir(superPath,new FsPermission((short)0700),false);
fsWrapper.mkdir(allPath,new FsPermission((short)0777),false);
fsWrapper.mkdir(nonEZDir,new FsPermission((short)0777),false);
dfsAdmin.createEncryptionZone(superPath,TEST_KEY);
dfsAdmin.createEncryptionZone(allPath,TEST_KEY);
dfsAdmin.allowSnapshot(new Path("/"));
final Path newSnap=fs.createSnapshot(new Path("/"));
DFSTestUtil.createFile(fs,superPathFile,len,(short)1,0xFEED);
DFSTestUtil.createFile(fs,allPathFile,len,(short)1,0xFEED);
DFSTestUtil.createFile(fs,nonEZFile,len,(short)1,0xFEED);
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final HdfsAdmin userAdmin=new HdfsAdmin(FileSystem.getDefaultUri(conf),conf);
try {
userAdmin.getEncryptionZoneForPath(null);
fail("should have thrown NPE");
}
catch ( NullPointerException e) {
}
assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(allPath).getPath().toString());
assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(allPathFile).getPath().toString());
try {
userAdmin.getEncryptionZoneForPath(superPathFile);
fail("expected AccessControlException");
}
catch ( AccessControlException e) {
assertExceptionContains("Permission denied:",e);
}
assertNull("expected null for non-ez path",userAdmin.getEncryptionZoneForPath(nonEZDir));
assertNull("expected null for non-ez path",userAdmin.getEncryptionZoneForPath(nonEZFile));
String snapshottedAllPath=newSnap.toString() + allPath.toString();
assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(new Path(snapshottedAllPath)).getPath().toString());
fs.delete(allPathFile,false);
assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(new Path(snapshottedAllPath)).getPath().toString());
fs.delete(allPath,true);
assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(new Path(snapshottedAllPath)).getPath().toString());
assertNull("expected null for deleted file path",userAdmin.getEncryptionZoneForPath(allPathFile));
assertNull("expected null for deleted directory path",userAdmin.getEncryptionZoneForPath(allPath));
return null;
}
}
);
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=120000) public void testReadWrite() throws Exception {
final HdfsAdmin dfsAdmin=new HdfsAdmin(FileSystem.getDefaultUri(conf),conf);
final Path baseFile=new Path("/base");
final int len=8192;
DFSTestUtil.createFile(fs,baseFile,len,(short)1,0xFEED);
final Path zone=new Path("/zone");
fs.mkdirs(zone);
dfsAdmin.createEncryptionZone(zone,TEST_KEY);
final Path encFile1=new Path(zone,"myfile");
DFSTestUtil.createFile(fs,encFile1,len,(short)1,0xFEED);
verifyFilesEqual(fs,baseFile,encFile1,len);
assertNumZones(1);
String keyName=dfsAdmin.listEncryptionZones().next().getKeyName();
cluster.getNamesystem().getProvider().rollNewVersion(keyName);
verifyFilesEqual(fs,baseFile,encFile1,len);
final Path encFile2=new Path(zone,"myfile2");
DFSTestUtil.createFile(fs,encFile2,len,(short)1,0xFEED);
FileEncryptionInfo feInfo1=getFileEncryptionInfo(encFile1);
FileEncryptionInfo feInfo2=getFileEncryptionInfo(encFile2);
assertFalse("EDEKs should be different",Arrays.equals(feInfo1.getEncryptedDataEncryptionKey(),feInfo2.getEncryptedDataEncryptionKey()));
assertNotEquals("Key was rolled, versions should be different",feInfo1.getEzKeyVersionName(),feInfo2.getEzKeyVersionName());
verifyFilesEqual(fs,encFile1,encFile2,len);
}
APIUtilityVerifierIterativeVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test(timeout=60000) public void testCipherSuiteNegotiation() throws Exception {
final HdfsAdmin dfsAdmin=new HdfsAdmin(FileSystem.getDefaultUri(conf),conf);
final Path zone=new Path("/zone");
fs.mkdirs(zone);
dfsAdmin.createEncryptionZone(zone,TEST_KEY);
DFSTestUtil.createFile(fs,new Path(zone,"success1"),0,(short)1,0xFEED);
fs.getClient().cipherSuites=Lists.newArrayListWithCapacity(0);
try {
DFSTestUtil.createFile(fs,new Path(zone,"fail"),0,(short)1,0xFEED);
fail("Created a file without specifying a CipherSuite!");
}
catch ( UnknownCipherSuiteException e) {
assertExceptionContains("No cipher suites",e);
}
fs.getClient().cipherSuites=Lists.newArrayListWithCapacity(3);
fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
try {
DFSTestUtil.createFile(fs,new Path(zone,"fail"),0,(short)1,0xFEED);
fail("Created a file without specifying a CipherSuite!");
}
catch ( UnknownCipherSuiteException e) {
assertExceptionContains("No cipher suites",e);
}
fs.getClient().cipherSuites=Lists.newArrayListWithCapacity(3);
fs.getClient().cipherSuites.add(CipherSuite.AES_CTR_NOPADDING);
fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
DFSTestUtil.createFile(fs,new Path(zone,"success2"),0,(short)1,0xFEED);
fs.getClient().cipherSuites=Lists.newArrayListWithCapacity(3);
fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
fs.getClient().cipherSuites.add(CipherSuite.AES_CTR_NOPADDING);
DFSTestUtil.createFile(fs,new Path(zone,"success3"),4096,(short)1,0xFEED);
cluster.getNamesystem().getProvider().flush();
KeyProvider provider=KeyProviderFactory.getProviders(conf).get(0);
List keys=provider.getKeys();
assertEquals("Expected NN to have created one key per zone",1,keys.size());
List allVersions=Lists.newArrayList();
for ( String key : keys) {
List versions=provider.getKeyVersions(key);
assertEquals("Should only have one key version per key",1,versions.size());
allVersions.addAll(versions);
}
for (int i=2; i <= 3; i++) {
FileEncryptionInfo feInfo=getFileEncryptionInfo(new Path(zone.toString() + "/success" + i));
assertEquals(feInfo.getCipherSuite(),CipherSuite.AES_CTR_NOPADDING);
}
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* Tests the retry logic in startFile. We release the lock while generating
* an EDEK, so tricky things can happen in the intervening time.
*/
@Test(timeout=120000) public void testStartFileRetry() throws Exception {
final Path zone1=new Path("/zone1");
final Path file=new Path(zone1,"file1");
fsWrapper.mkdir(zone1,FsPermission.getDirDefault(),true);
ExecutorService executor=Executors.newSingleThreadExecutor();
executor.submit(new InjectFaultTask(){
@Override public void doFault() throws Exception {
dfsAdmin.createEncryptionZone(zone1,TEST_KEY);
}
@Override public void doCleanup() throws Exception {
assertEquals("Expected a startFile retry",2,injector.generateCount);
fsWrapper.delete(file,false);
}
}
).get();
executor.submit(new InjectFaultTask(){
@Override public void doFault() throws Exception {
fsWrapper.delete(zone1,true);
}
@Override public void doCleanup() throws Exception {
assertEquals("Expected no startFile retries",1,injector.generateCount);
fsWrapper.delete(file,false);
}
}
).get();
fsWrapper.mkdir(zone1,FsPermission.getDirDefault(),true);
final String otherKey="otherKey";
DFSTestUtil.createKey(otherKey,cluster,conf);
dfsAdmin.createEncryptionZone(zone1,TEST_KEY);
executor.submit(new InjectFaultTask(){
@Override public void doFault() throws Exception {
fsWrapper.delete(zone1,true);
fsWrapper.mkdir(zone1,FsPermission.getDirDefault(),true);
dfsAdmin.createEncryptionZone(zone1,otherKey);
}
@Override public void doCleanup() throws Exception {
assertEquals("Expected a startFile retry",2,injector.generateCount);
fsWrapper.delete(zone1,true);
}
}
).get();
fsWrapper.mkdir(zone1,FsPermission.getDirDefault(),true);
final String anotherKey="anotherKey";
DFSTestUtil.createKey(anotherKey,cluster,conf);
dfsAdmin.createEncryptionZone(zone1,anotherKey);
String keyToUse=otherKey;
MyInjector injector=new MyInjector();
EncryptionFaultInjector.instance=injector;
Future> future=executor.submit(new CreateFileTask(fsWrapper,file));
for (int i=0; i < 10; i++) {
injector.ready.await();
fsWrapper.delete(zone1,true);
fsWrapper.mkdir(zone1,FsPermission.getDirDefault(),true);
dfsAdmin.createEncryptionZone(zone1,keyToUse);
if (keyToUse == otherKey) {
keyToUse=anotherKey;
}
else {
keyToUse=otherKey;
}
injector.wait.countDown();
injector=new MyInjector();
EncryptionFaultInjector.instance=injector;
}
try {
future.get();
fail("Expected exception from too many retries");
}
catch ( ExecutionException e) {
assertExceptionContains("Too many retries because of encryption zone operations",e.getCause());
}
}
Class: org.apache.hadoop.hdfs.TestFileAppend
InternalCallVerifierEqualityVerifier
/**
* Tests appending after soft-limit expires.
*/
@Test public void testAppendAfterSoftLimit() throws IOException, InterruptedException {
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,1);
final long softLimit=1L;
final long hardLimit=9999999L;
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.setLeasePeriod(softLimit,hardLimit);
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
FileSystem fs2=new DistributedFileSystem();
fs2.initialize(fs.getUri(),conf);
final Path testPath=new Path("/testAppendAfterSoftLimit");
final byte[] fileContents=AppendTestUtil.initBuffer(32);
FSDataOutputStream out=fs.create(testPath);
out.write(fileContents);
Thread.sleep(250);
try {
FSDataOutputStream appendStream2=fs2.append(testPath);
appendStream2.write(fileContents);
appendStream2.close();
assertEquals(fileContents.length,fs.getFileStatus(testPath).getLen());
}
finally {
fs.close();
fs2.close();
cluster.shutdown();
}
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* Test two consecutive appends on a file with a full block.
*/
@Test public void testAppendTwice() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
final FileSystem fs1=cluster.getFileSystem();
final FileSystem fs2=AppendTestUtil.createHdfsWithDifferentUsername(conf);
try {
final Path p=new Path("/testAppendTwice/foo");
final int len=1 << 16;
final byte[] fileContents=AppendTestUtil.initBuffer(len);
{
FSDataOutputStream out=fs2.create(p,true,4096,(short)1,len);
out.write(fileContents,0,len);
out.close();
}
fs2.append(p);
fs1.append(p);
Assert.fail();
}
catch ( RemoteException re) {
AppendTestUtil.LOG.info("Got an exception:",re);
Assert.assertEquals(AlreadyBeingCreatedException.class.getName(),re.getClassName());
}
finally {
fs2.close();
fs1.close();
cluster.shutdown();
}
}
Class: org.apache.hadoop.hdfs.TestFileAppend3
APIUtilityVerifierIterativeVerifierBranchVerifierInternalCallVerifierBooleanVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
/**
* TC11: Racing rename
* @throws IOException an exception might be thrown
*/
@Test public void testTC11() throws Exception {
final Path p=new Path("/TC11/foo");
System.out.println("p=" + p);
final int len1=(int)BLOCK_SIZE;
{
FSDataOutputStream out=fs.create(p,false,buffersize,REPLICATION,BLOCK_SIZE);
AppendTestUtil.write(out,0,len1);
out.close();
}
FSDataOutputStream out=fs.append(p);
final int len2=(int)BLOCK_SIZE / 2;
AppendTestUtil.write(out,len1,len2);
out.hflush();
final Path pnew=new Path(p + ".new");
assertTrue(fs.rename(p,pnew));
out.close();
final long len=fs.getFileStatus(pnew).getLen();
final LocatedBlocks locatedblocks=fs.dfs.getNamenode().getBlockLocations(pnew.toString(),0L,len);
final int numblock=locatedblocks.locatedBlockCount();
for (int i=0; i < numblock; i++) {
final LocatedBlock lb=locatedblocks.get(i);
final ExtendedBlock blk=lb.getBlock();
final long size=lb.getBlockSize();
if (i < numblock - 1) {
assertEquals(BLOCK_SIZE,size);
}
for ( DatanodeInfo datanodeinfo : lb.getLocations()) {
final DataNode dn=cluster.getDataNode(datanodeinfo.getIpcPort());
final Block metainfo=DataNodeTestUtils.getFSDataset(dn).getStoredBlock(blk.getBlockPoolId(),blk.getBlockId());
assertEquals(size,metainfo.getNumBytes());
}
}
}
InternalCallVerifierEqualityVerifier
/**
* Append to a partial CRC chunk and
* the first write does not fill up the partial CRC trunk
* *
* @throws IOException
*/
@Test public void testAppendToPartialChunk() throws IOException {
final Path p=new Path("/partialChunk/foo");
final int fileLen=513;
System.out.println("p=" + p);
byte[] fileContents=AppendTestUtil.initBuffer(fileLen);
FSDataOutputStream stm=AppendTestUtil.createFile(fs,p,1);
stm.write(fileContents,0,1);
stm.close();
System.out.println("Wrote 1 byte and closed the file " + p);
stm=fs.append(p);
stm.write(fileContents,1,1);
stm.hflush();
stm.close();
System.out.println("Append 1 byte and closed the file " + p);
stm=fs.append(p);
assertEquals(2,stm.getPos());
stm.write(fileContents,2,1);
stm.hflush();
System.out.println("Append and flush 1 byte");
stm.write(fileContents,3,2);
stm.hflush();
System.out.println("Append and flush 2 byte");
stm.write(fileContents,5,fileLen - 5);
stm.close();
System.out.println("Flush 508 byte and closed the file " + p);
AppendTestUtil.checkFullFile(fs,p,fileLen,fileContents,"Failed to append to a partial chunk");
}
APIUtilityVerifierInternalCallVerifierEqualityVerifierPublicFieldVerifier
/**
* TC7: Corrupted replicas are present.
* @throws IOException an exception might be thrown
*/
@Test public void testTC7() throws Exception {
final short repl=2;
final Path p=new Path("/TC7/foo");
System.out.println("p=" + p);
final int len1=(int)(BLOCK_SIZE / 2);
{
FSDataOutputStream out=fs.create(p,false,buffersize,repl,BLOCK_SIZE);
AppendTestUtil.write(out,0,len1);
out.close();
}
DFSTestUtil.waitReplication(fs,p,repl);
final LocatedBlocks locatedblocks=fs.dfs.getNamenode().getBlockLocations(p.toString(),0L,len1);
assertEquals(1,locatedblocks.locatedBlockCount());
final LocatedBlock lb=locatedblocks.get(0);
final ExtendedBlock blk=lb.getBlock();
assertEquals(len1,lb.getBlockSize());
DatanodeInfo[] datanodeinfos=lb.getLocations();
assertEquals(repl,datanodeinfos.length);
final DataNode dn=cluster.getDataNode(datanodeinfos[0].getIpcPort());
final File f=DataNodeTestUtils.getBlockFile(dn,blk.getBlockPoolId(),blk.getLocalBlock());
final RandomAccessFile raf=new RandomAccessFile(f,"rw");
AppendTestUtil.LOG.info("dn=" + dn + ", blk="+ blk+ " (length="+ blk.getNumBytes()+ ")");
assertEquals(len1,raf.length());
raf.setLength(0);
raf.close();
final int len2=(int)BLOCK_SIZE;
{
FSDataOutputStream out=fs.append(p);
AppendTestUtil.write(out,len1,len2);
out.close();
}
AppendTestUtil.check(fs,p,len1 + len2);
}
InternalCallVerifierEqualityVerifier
/**
* Earlier versions of HDFS had a bug (HDFS-2991) which caused
* append(), when called exactly at a block boundary,
* to not log an OP_ADD. This ensures that we can read from
* such buggy versions correctly, by loading an image created
* using a namesystem image created with 0.23.1-rc2 exhibiting
* the issue.
*/
@Test public void testLoadLogsFromBuggyEarlierVersions() throws IOException {
final Configuration conf=new HdfsConfiguration();
String tarFile=System.getProperty("test.cache.data","build/test/cache") + "/" + HADOOP_23_BROKEN_APPEND_TGZ;
String testDir=PathUtils.getTestDirName(getClass());
File dfsDir=new File(testDir,"image-with-buggy-append");
if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
}
FileUtil.unTar(new File(tarFile),new File(testDir));
File nameDir=new File(dfsDir,"name");
GenericTestUtils.assertExists(nameDir);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameDir.getAbsolutePath());
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).numDataNodes(0).waitSafeMode(false).startupOption(StartupOption.UPGRADE).build();
try {
FileSystem fs=cluster.getFileSystem();
Path testPath=new Path("/tmp/io_data/test_io_0");
assertEquals(2 * 1024 * 1024,fs.getFileStatus(testPath).getLen());
}
finally {
cluster.shutdown();
}
}
Class: org.apache.hadoop.hdfs.TestFileCorruption
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test the case that a replica is reported corrupt while it is not
* in blocksMap. Make sure that ArrayIndexOutOfBounds does not thrown.
* See Hadoop-4351.
*/
@Test public void testArrayOutOfBoundsException() throws Exception {
MiniDFSCluster cluster=null;
try {
Configuration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
final Path FILE_PATH=new Path("/tmp.txt");
final long FILE_LEN=1L;
DFSTestUtil.createFile(fs,FILE_PATH,FILE_LEN,(short)2,1L);
final String bpid=cluster.getNamesystem().getBlockPoolId();
File storageDir=cluster.getInstanceStorageDir(0,0);
File dataDir=MiniDFSCluster.getFinalizedDir(storageDir,bpid);
assertTrue("Data directory does not exist",dataDir.exists());
ExtendedBlock blk=getBlock(bpid,dataDir);
if (blk == null) {
storageDir=cluster.getInstanceStorageDir(0,1);
dataDir=MiniDFSCluster.getFinalizedDir(storageDir,bpid);
blk=getBlock(bpid,dataDir);
}
assertFalse("Data directory does not contain any blocks or there was an " + "IO error",blk == null);
cluster.startDataNodes(conf,1,true,null,null);
ArrayList datanodes=cluster.getDataNodes();
assertEquals(datanodes.size(),3);
DataNode dataNode=datanodes.get(2);
DatanodeRegistration dnR=DataNodeTestUtils.getDNRegistrationForBP(dataNode,blk.getBlockPoolId());
FSNamesystem ns=cluster.getNamesystem();
ns.writeLock();
try {
cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(blk,new DatanodeInfo(dnR),"TEST","STORAGE_ID");
}
finally {
ns.writeUnlock();
}
fs.open(FILE_PATH);
fs.delete(FILE_PATH,false);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Class: org.apache.hadoop.hdfs.TestFileCreation
IterativeVerifierInternalCallVerifierEqualityVerifier
/**
* Test creating two files at the same time.
*/
@Test public void testConcurrentFileCreation() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
try {
FileSystem fs=cluster.getFileSystem();
Path[] p={new Path("/foo"),new Path("/bar")};
FSDataOutputStream[] out={fs.create(p[0]),fs.create(p[1])};
int i=0;
for (; i < 100; i++) {
out[0].write(i);
out[1].write(i);
}
out[0].close();
for (; i < 200; i++) {
out[1].write(i);
}
out[1].close();
FSDataInputStream[] in={fs.open(p[0]),fs.open(p[1])};
for (i=0; i < 100; i++) {
assertEquals(i,in[0].read());
}
for (i=0; i < 200; i++) {
assertEquals(i,in[1].read());
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifierBranchVerifierInternalCallVerifierBooleanVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
/**
* Create a file, write something, hflush but not close.
* Then change lease period and wait for lease recovery.
* Finally, read the block directly from each Datanode and verify the content.
*/
@Test public void testLeaseExpireHardLimit() throws Exception {
System.out.println("testLeaseExpireHardLimit start");
final long leasePeriod=1000;
final int DATANODE_NUM=3;
Configuration conf=new HdfsConfiguration();
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
DistributedFileSystem dfs=null;
try {
cluster.waitActive();
dfs=cluster.getFileSystem();
final String f=DIR + "foo";
final Path fpath=new Path(f);
HdfsDataOutputStream out=create(dfs,fpath,DATANODE_NUM);
out.write("something".getBytes());
out.hflush();
int actualRepl=out.getCurrentBlockReplication();
assertTrue(f + " should be replicated to " + DATANODE_NUM+ " datanodes.",actualRepl == DATANODE_NUM);
cluster.setLeasePeriod(leasePeriod,leasePeriod);
try {
Thread.sleep(5 * leasePeriod);
}
catch ( InterruptedException e) {
}
LocatedBlocks locations=dfs.dfs.getNamenode().getBlockLocations(f,0,Long.MAX_VALUE);
assertEquals(1,locations.locatedBlockCount());
LocatedBlock locatedblock=locations.getLocatedBlocks().get(0);
int successcount=0;
for ( DatanodeInfo datanodeinfo : locatedblock.getLocations()) {
DataNode datanode=cluster.getDataNode(datanodeinfo.getIpcPort());
ExtendedBlock blk=locatedblock.getBlock();
Block b=DataNodeTestUtils.getFSDataset(datanode).getStoredBlock(blk.getBlockPoolId(),blk.getBlockId());
final File blockfile=DataNodeTestUtils.getFile(datanode,blk.getBlockPoolId(),b.getBlockId());
System.out.println("blockfile=" + blockfile);
if (blockfile != null) {
BufferedReader in=new BufferedReader(new FileReader(blockfile));
assertEquals("something",in.readLine());
in.close();
successcount++;
}
}
System.out.println("successcount=" + successcount);
assertTrue(successcount > 0);
}
finally {
IOUtils.closeStream(dfs);
cluster.shutdown();
}
System.out.println("testLeaseExpireHardLimit successful");
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
/**
* Test that file leases are persisted across namenode restarts.
*/
@Test public void testFileCreationNamenodeRestart() throws IOException {
Configuration conf=new HdfsConfiguration();
final int MAX_IDLE_TIME=2000;
conf.setInt("ipc.client.connection.maxidletime",MAX_IDLE_TIME);
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY,1);
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem fs=null;
try {
cluster.waitActive();
fs=cluster.getFileSystem();
final int nnport=cluster.getNameNodePort();
Path file1=new Path("/filestatus.dat");
HdfsDataOutputStream stm=create(fs,file1,1);
System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file1);
assertEquals(file1 + " should be replicated to 1 datanode.",1,stm.getCurrentBlockReplication());
writeFile(stm,numBlocks * blockSize);
stm.hflush();
assertEquals(file1 + " should still be replicated to 1 datanode.",1,stm.getCurrentBlockReplication());
Path fileRenamed=new Path("/filestatusRenamed.dat");
fs.rename(file1,fileRenamed);
System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file1 + " to "+ fileRenamed);
file1=fileRenamed;
Path file2=new Path("/filestatus2.dat");
FSDataOutputStream stm2=createFile(fs,file2,1);
System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file2);
Path file3=new Path("/user/home/fullpath.dat");
FSDataOutputStream stm3=createFile(fs,file3,1);
System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file3);
Path file4=new Path("/user/home/fullpath4.dat");
FSDataOutputStream stm4=createFile(fs,file4,1);
System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file4);
fs.mkdirs(new Path("/bin"));
fs.rename(new Path("/user/home"),new Path("/bin"));
Path file3new=new Path("/bin/home/fullpath.dat");
System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file3 + " to "+ file3new);
Path file4new=new Path("/bin/home/fullpath4.dat");
System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file4 + " to "+ file4new);
cluster.shutdown();
try {
Thread.sleep(2 * MAX_IDLE_TIME);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
cluster.shutdown();
try {
Thread.sleep(5000);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
fs=cluster.getFileSystem();
DFSOutputStream dfstream=(DFSOutputStream)(stm.getWrappedStream());
dfstream.setTestFilename(file1.toString());
dfstream=(DFSOutputStream)(stm3.getWrappedStream());
dfstream.setTestFilename(file3new.toString());
dfstream=(DFSOutputStream)(stm4.getWrappedStream());
dfstream.setTestFilename(file4new.toString());
byte[] buffer=AppendTestUtil.randomBytes(seed,1);
stm.write(buffer);
stm.close();
stm2.write(buffer);
stm2.close();
stm3.close();
stm4.close();
DFSClient client=fs.dfs;
LocatedBlocks locations=client.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE);
System.out.println("locations = " + locations.locatedBlockCount());
assertTrue("Error blocks were not cleaned up for file " + file1,locations.locatedBlockCount() == 3);
locations=client.getNamenode().getBlockLocations(file2.toString(),0,Long.MAX_VALUE);
System.out.println("locations = " + locations.locatedBlockCount());
assertTrue("Error blocks were not cleaned up for file " + file2,locations.locatedBlockCount() == 1);
}
finally {
IOUtils.closeStream(fs);
cluster.shutdown();
}
}
InternalCallVerifierEqualityVerifierPublicFieldVerifier
/**
* Test that the filesystem removes the last block from a file if its
* lease expires.
*/
@Test public void testFileCreationError2() throws IOException {
long leasePeriod=1000;
System.out.println("testFileCreationError2 start");
Configuration conf=new HdfsConfiguration();
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY,1);
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem dfs=null;
try {
cluster.waitActive();
dfs=cluster.getFileSystem();
DFSClient client=dfs.dfs;
Path file1=new Path("/filestatus.dat");
createFile(dfs,file1,1);
System.out.println("testFileCreationError2: " + "Created file filestatus.dat with one replicas.");
LocatedBlocks locations=client.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE);
System.out.println("testFileCreationError2: " + "The file has " + locations.locatedBlockCount() + " blocks.");
LocatedBlock location=client.getNamenode().addBlock(file1.toString(),client.clientName,null,null,INodeId.GRANDFATHER_INODE_ID,null);
System.out.println("testFileCreationError2: " + "Added block " + location.getBlock());
locations=client.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE);
int count=locations.locatedBlockCount();
System.out.println("testFileCreationError2: " + "The file now has " + count + " blocks.");
cluster.setLeasePeriod(leasePeriod,leasePeriod);
try {
Thread.sleep(5 * leasePeriod);
}
catch ( InterruptedException e) {
}
locations=client.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE);
System.out.println("testFileCreationError2: " + "locations = " + locations.locatedBlockCount());
assertEquals(0,locations.locatedBlockCount());
System.out.println("testFileCreationError2 successful");
}
finally {
IOUtils.closeStream(dfs);
cluster.shutdown();
}
}
UtilityVerifierAssumptionSetterEqualityVerifierHybridVerifier
/**
* Same test but the client should bind to a local interface
*/
@Test public void testFileCreationSetLocalInterface() throws IOException {
assumeTrue(System.getProperty("os.name").startsWith("Linux"));
checkFileCreation("lo",false);
try {
checkFileCreation("bogus-interface",false);
fail("Able to specify a bogus interface");
}
catch ( UnknownHostException e) {
assertEquals("No such interface bogus-interface",e.getMessage());
}
}
IterativeVerifierInternalCallVerifierEqualityVerifier
/**
* Test creating a file whose data gets sync when closed
*/
@Test public void testFileCreationSyncOnClose() throws IOException {
Configuration conf=new HdfsConfiguration();
conf.setBoolean(DFS_DATANODE_SYNCONCLOSE_KEY,true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
try {
FileSystem fs=cluster.getFileSystem();
Path[] p={new Path("/foo"),new Path("/bar")};
FSDataOutputStream[] out={fs.create(p[0]),fs.create(p[1])};
int i=0;
for (; i < 100; i++) {
out[0].write(i);
out[1].write(i);
}
out[0].close();
for (; i < 200; i++) {
out[1].write(i);
}
out[1].close();
FSDataInputStream[] in={fs.open(p[0]),fs.open(p[1])};
for (i=0; i < 100; i++) {
assertEquals(i,in[0].read());
}
for (i=0; i < 200; i++) {
assertEquals(i,in[1].read());
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifierEqualityVerifier
/**
* Test that server default values can be retrieved on the client side
*/
@Test public void testServerDefaults() throws IOException {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFS_BLOCK_SIZE_KEY,DFS_BLOCK_SIZE_DEFAULT);
conf.setInt(DFS_BYTES_PER_CHECKSUM_KEY,DFS_BYTES_PER_CHECKSUM_DEFAULT);
conf.setInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY,DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
conf.setInt(DFS_REPLICATION_KEY,DFS_REPLICATION_DEFAULT + 1);
conf.setInt(IO_FILE_BUFFER_SIZE_KEY,IO_FILE_BUFFER_SIZE_DEFAULT);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(DFSConfigKeys.DFS_REPLICATION_DEFAULT + 1).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
try {
FsServerDefaults serverDefaults=fs.getServerDefaults();
assertEquals(DFS_BLOCK_SIZE_DEFAULT,serverDefaults.getBlockSize());
assertEquals(DFS_BYTES_PER_CHECKSUM_DEFAULT,serverDefaults.getBytesPerChecksum());
assertEquals(DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT,serverDefaults.getWritePacketSize());
assertEquals(DFS_REPLICATION_DEFAULT + 1,serverDefaults.getReplication());
assertEquals(IO_FILE_BUFFER_SIZE_DEFAULT,serverDefaults.getFileBufferSize());
}
finally {
fs.close();
cluster.shutdown();
}
}
UtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Tests the fileLength when we sync the file and restart the cluster and
* Datanodes not report to Namenode yet.
*/
@Test(timeout=60000) public void testFileLengthWithHSyncAndClusterRestartWithOutDNsRegister() throws Exception {
final Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,512);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
HdfsDataInputStream in=null;
try {
Path path=new Path("/tmp/TestFileLengthOnClusterRestart","test");
DistributedFileSystem dfs=cluster.getFileSystem();
FSDataOutputStream out=dfs.create(path);
int fileLength=1030;
out.write(new byte[fileLength]);
out.hsync();
cluster.restartNameNode();
cluster.waitActive();
in=(HdfsDataInputStream)dfs.open(path,1024);
Assert.assertEquals(fileLength,in.getVisibleLength());
cluster.shutdownDataNodes();
cluster.restartNameNode(false);
verifyNNIsInSafeMode(dfs);
try {
in=(HdfsDataInputStream)dfs.open(path);
Assert.fail("Expected IOException");
}
catch ( IOException e) {
Assert.assertTrue(e.getLocalizedMessage().indexOf("Name node is in safe mode") >= 0);
}
}
finally {
if (null != in) {
in.close();
}
cluster.shutdown();
}
}
Class: org.apache.hadoop.hdfs.TestFileStatus
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test the FileStatus obtained calling listStatus on a file
*/
@Test public void testListStatusOnFile() throws IOException {
FileStatus[] stats=fs.listStatus(file1);
assertEquals(1,stats.length);
FileStatus status=stats[0];
assertFalse(file1 + " should be a file",status.isDirectory());
assertEquals(blockSize,status.getBlockSize());
assertEquals(1,status.getReplication());
assertEquals(fileSize,status.getLen());
assertEquals(file1.makeQualified(fs.getUri(),fs.getWorkingDirectory()).toString(),status.getPath().toString());
RemoteIterator itor=fc.listStatus(file1);
status=itor.next();
assertEquals(stats[0],status);
assertFalse(file1 + " should be a file",status.isDirectory());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test the FileStatus obtained calling getFileStatus on a file
*/
@Test public void testGetFileStatusOnFile() throws Exception {
checkFile(fs,file1,1);
FileStatus status=fs.getFileStatus(file1);
assertFalse(file1 + " should be a file",status.isDirectory());
assertEquals(blockSize,status.getBlockSize());
assertEquals(1,status.getReplication());
assertEquals(fileSize,status.getLen());
assertEquals(file1.makeQualified(fs.getUri(),fs.getWorkingDirectory()).toString(),status.getPath().toString());
}
UtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test getting a FileStatus object using a non-existant path
*/
@Test public void testGetFileStatusOnNonExistantFileDir() throws IOException {
Path dir=new Path("/test/mkdirs");
try {
fs.listStatus(dir);
fail("listStatus of non-existent path should fail");
}
catch ( FileNotFoundException fe) {
assertEquals("File " + dir + " does not exist.",fe.getMessage());
}
try {
fc.listStatus(dir);
fail("listStatus of non-existent path should fail");
}
catch ( FileNotFoundException fe) {
assertEquals("File " + dir + " does not exist.",fe.getMessage());
}
try {
fs.getFileStatus(dir);
fail("getFileStatus of non-existent path should fail");
}
catch ( FileNotFoundException fe) {
assertTrue("Exception doesn't indicate non-existant path",fe.getMessage().startsWith("File does not exist"));
}
}
UtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test calling getFileInfo directly on the client
*/
@Test public void testGetFileInfo() throws IOException {
Path path=new Path("/");
assertTrue("/ should be a directory",fs.getFileStatus(path).isDirectory());
HdfsFileStatus fileInfo=dfsClient.getFileInfo("/noSuchFile");
assertEquals("Non-existant file should result in null",null,fileInfo);
Path path1=new Path("/name1");
Path path2=new Path("/name1/name2");
assertTrue(fs.mkdirs(path1));
FSDataOutputStream out=fs.create(path2,false);
out.close();
fileInfo=dfsClient.getFileInfo(path1.toString());
assertEquals(1,fileInfo.getChildrenNum());
fileInfo=dfsClient.getFileInfo(path2.toString());
assertEquals(0,fileInfo.getChildrenNum());
try {
dfsClient.getFileInfo("non-absolute");
fail("getFileInfo for a non-absolute path did not throw IOException");
}
catch ( RemoteException re) {
assertTrue("Wrong exception for invalid file name",re.toString().contains("Invalid file name"));
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test FileStatus objects obtained from a directory
*/
@Test public void testGetFileStatusOnDir() throws Exception {
Path dir=new Path("/test/mkdirs");
assertTrue("mkdir failed",fs.mkdirs(dir));
assertTrue("mkdir failed",fs.exists(dir));
FileStatus status=fs.getFileStatus(dir);
assertTrue(dir + " should be a directory",status.isDirectory());
assertTrue(dir + " should be zero size ",status.getLen() == 0);
assertEquals(dir.makeQualified(fs.getUri(),fs.getWorkingDirectory()).toString(),status.getPath().toString());
FileStatus[] stats=fs.listStatus(dir);
assertEquals(dir + " should be empty",0,stats.length);
assertEquals(dir + " should be zero size ",0,fs.getContentSummary(dir).getLength());
RemoteIterator itor=fc.listStatus(dir);
assertFalse(dir + " should be empty",itor.hasNext());
Path file2=new Path(dir,"filestatus2.dat");
DFSTestUtil.createFile(fs,file2,blockSize / 4,blockSize / 4,blockSize,(short)1,seed);
checkFile(fs,file2,1);
status=fs.getFileStatus(file2);
assertEquals(blockSize,status.getBlockSize());
assertEquals(1,status.getReplication());
file2=fs.makeQualified(file2);
assertEquals(file2.toString(),status.getPath().toString());
Path file3=new Path(dir,"filestatus3.dat");
DFSTestUtil.createFile(fs,file3,blockSize / 4,blockSize / 4,blockSize,(short)1,seed);
checkFile(fs,file3,1);
file3=fs.makeQualified(file3);
final int expected=blockSize / 2;
assertEquals(dir + " size should be " + expected,expected,fs.getContentSummary(dir).getLength());
stats=fs.listStatus(dir);
assertEquals(dir + " should have two entries",2,stats.length);
assertEquals(file2.toString(),stats[0].getPath().toString());
assertEquals(file3.toString(),stats[1].getPath().toString());
itor=fc.listStatus(dir);
assertEquals(file2.toString(),itor.next().getPath().toString());
assertEquals(file3.toString(),itor.next().getPath().toString());
assertFalse("Unexpected addtional file",itor.hasNext());
Path dir3=fs.makeQualified(new Path(dir,"dir3"));
fs.mkdirs(dir3);
dir3=fs.makeQualified(dir3);
stats=fs.listStatus(dir);
assertEquals(dir + " should have three entries",3,stats.length);
assertEquals(dir3.toString(),stats[0].getPath().toString());
assertEquals(file2.toString(),stats[1].getPath().toString());
assertEquals(file3.toString(),stats[2].getPath().toString());
itor=fc.listStatus(dir);
assertEquals(dir3.toString(),itor.next().getPath().toString());
assertEquals(file2.toString(),itor.next().getPath().toString());
assertEquals(file3.toString(),itor.next().getPath().toString());
assertFalse("Unexpected addtional file",itor.hasNext());
Path dir4=fs.makeQualified(new Path(dir,"dir4"));
fs.mkdirs(dir4);
dir4=fs.makeQualified(dir4);
Path dir5=fs.makeQualified(new Path(dir,"dir5"));
fs.mkdirs(dir5);
dir5=fs.makeQualified(dir5);
stats=fs.listStatus(dir);
assertEquals(dir + " should have five entries",5,stats.length);
assertEquals(dir3.toString(),stats[0].getPath().toString());
assertEquals(dir4.toString(),stats[1].getPath().toString());
assertEquals(dir5.toString(),stats[2].getPath().toString());
assertEquals(file2.toString(),stats[3].getPath().toString());
assertEquals(file3.toString(),stats[4].getPath().toString());
itor=fc.listStatus(dir);
assertEquals(dir3.toString(),itor.next().getPath().toString());
assertEquals(dir4.toString(),itor.next().getPath().toString());
assertEquals(dir5.toString(),itor.next().getPath().toString());
assertEquals(file2.toString(),itor.next().getPath().toString());
assertEquals(file3.toString(),itor.next().getPath().toString());
assertFalse(itor.hasNext());
fs.delete(dir,true);
}
Class: org.apache.hadoop.hdfs.TestGetBlocks
APIUtilityVerifierIterativeVerifierInternalCallVerifierEqualityVerifier
/**
* test getBlocks
*/
@Test public void testGetBlocks() throws Exception {
final Configuration CONF=new HdfsConfiguration();
final short REPLICATION_FACTOR=(short)2;
final int DEFAULT_BLOCK_SIZE=1024;
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DEFAULT_BLOCK_SIZE);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(CONF).numDataNodes(REPLICATION_FACTOR).build();
try {
cluster.waitActive();
long fileLen=2 * DEFAULT_BLOCK_SIZE;
DFSTestUtil.createFile(cluster.getFileSystem(),new Path("/tmp.txt"),fileLen,REPLICATION_FACTOR,0L);
List locatedBlocks;
DatanodeInfo[] dataNodes=null;
boolean notWritten;
do {
final DFSClient dfsclient=new DFSClient(NameNode.getAddress(CONF),CONF);
locatedBlocks=dfsclient.getNamenode().getBlockLocations("/tmp.txt",0,fileLen).getLocatedBlocks();
assertEquals(2,locatedBlocks.size());
notWritten=false;
for (int i=0; i < 2; i++) {
dataNodes=locatedBlocks.get(i).getLocations();
if (dataNodes.length != REPLICATION_FACTOR) {
notWritten=true;
try {
Thread.sleep(10);
}
catch ( InterruptedException e) {
}
break;
}
}
}
while (notWritten);
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
NamenodeProtocol namenode=NameNodeProxies.createProxy(CONF,NameNode.getUri(addr),NamenodeProtocol.class).getProxy();
BlockWithLocations[] locs;
locs=namenode.getBlocks(dataNodes[0],fileLen).getBlocks();
assertEquals(locs.length,2);
assertEquals(locs[0].getStorageIDs().length,2);
assertEquals(locs[1].getStorageIDs().length,2);
locs=namenode.getBlocks(dataNodes[0],DEFAULT_BLOCK_SIZE).getBlocks();
assertEquals(locs.length,1);
assertEquals(locs[0].getStorageIDs().length,2);
locs=namenode.getBlocks(dataNodes[0],1).getBlocks();
assertEquals(locs.length,1);
assertEquals(locs[0].getStorageIDs().length,2);
getBlocksWithException(namenode,dataNodes[0],0);
getBlocksWithException(namenode,dataNodes[0],-1);
DatanodeInfo info=DFSTestUtil.getDatanodeInfo("1.2.3.4");
getBlocksWithException(namenode,info,2);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifierIterativeVerifierEqualityVerifier
@Test public void testBlockKey(){
Map map=new HashMap();
final Random RAN=new Random();
final long seed=RAN.nextLong();
System.out.println("seed=" + seed);
RAN.setSeed(seed);
long[] blkids=new long[10];
for (int i=0; i < blkids.length; i++) {
blkids[i]=1000L + RAN.nextInt(100000);
map.put(new Block(blkids[i],0,blkids[i]),blkids[i]);
}
System.out.println("map=" + map.toString().replace(",","\n "));
for (int i=0; i < blkids.length; i++) {
Block b=new Block(blkids[i],0,GenerationStamp.GRANDFATHER_GENERATION_STAMP);
Long v=map.get(b);
System.out.println(b + " => " + v);
assertEquals(blkids[i],v.longValue());
}
}
APIUtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test if the datanodes returned by{@link ClientProtocol#getBlockLocations(String,long,long)} is correct
* when stale nodes checking is enabled. Also test during the scenario when 1)
* stale nodes checking is enabled, 2) a writing is going on, 3) a datanode
* becomes stale happen simultaneously
* @throws Exception
*/
@Test public void testReadSelectNonStaleDatanode() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY,true);
long staleInterval=30 * 1000 * 60;
conf.setLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,staleInterval);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).racks(racks).build();
cluster.waitActive();
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(addr,conf);
List nodeInfoList=cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanodeListForReport(DatanodeReportType.LIVE);
assertEquals("Unexpected number of datanodes",numDatanodes,nodeInfoList.size());
FileSystem fileSys=cluster.getFileSystem();
FSDataOutputStream stm=null;
try {
final Path fileName=new Path("/file1");
stm=fileSys.create(fileName,true,fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY,4096),(short)3,blockSize);
stm.write(new byte[(blockSize * 3) / 2]);
stm.hflush();
LocatedBlocks blocks=client.getNamenode().getBlockLocations(fileName.toString(),0,blockSize);
DatanodeInfo[] nodes=blocks.get(0).getLocations();
assertEquals(nodes.length,3);
DataNode staleNode=null;
DatanodeDescriptor staleNodeInfo=null;
staleNode=this.stopDataNodeHeartbeat(cluster,nodes[0].getHostName());
assertNotNull(staleNode);
staleNodeInfo=cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(staleNode.getDatanodeId());
staleNodeInfo.setLastUpdate(Time.now() - staleInterval - 1);
LocatedBlocks blocksAfterStale=client.getNamenode().getBlockLocations(fileName.toString(),0,blockSize);
DatanodeInfo[] nodesAfterStale=blocksAfterStale.get(0).getLocations();
assertEquals(nodesAfterStale.length,3);
assertEquals(nodesAfterStale[2].getHostName(),nodes[0].getHostName());
DataNodeTestUtils.setHeartbeatsDisabledForTests(staleNode,false);
staleNodeInfo.setLastUpdate(Time.now());
LocatedBlock lastBlock=client.getLocatedBlocks(fileName.toString(),0,Long.MAX_VALUE).getLastLocatedBlock();
nodes=lastBlock.getLocations();
assertEquals(nodes.length,3);
staleNode=this.stopDataNodeHeartbeat(cluster,nodes[0].getHostName());
assertNotNull(staleNode);
cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(staleNode.getDatanodeId()).setLastUpdate(Time.now() - staleInterval - 1);
LocatedBlock lastBlockAfterStale=client.getLocatedBlocks(fileName.toString(),0,Long.MAX_VALUE).getLastLocatedBlock();
nodesAfterStale=lastBlockAfterStale.getLocations();
assertEquals(nodesAfterStale.length,3);
assertEquals(nodesAfterStale[2].getHostName(),nodes[0].getHostName());
}
finally {
if (stm != null) {
stm.close();
}
if (client != null) {
client.close();
}
cluster.shutdown();
}
}
Class: org.apache.hadoop.hdfs.TestHFlush
InternalCallVerifierEqualityVerifier
/**
* Test hsync (with updating block length in NameNode) while no data is
* actually written yet
*/
@Test public void hSyncUpdateLength_00() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
DistributedFileSystem fileSystem=cluster.getFileSystem();
try {
Path path=new Path(fName);
FSDataOutputStream stm=fileSystem.create(path,true,4096,(short)2,AppendTestUtil.BLOCK_SIZE);
System.out.println("Created file " + path.toString());
((DFSOutputStream)stm.getWrappedStream()).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
long currentFileLength=fileSystem.getFileStatus(path).getLen();
assertEquals(0L,currentFileLength);
stm.close();
}
finally {
fileSystem.close();
cluster.shutdown();
}
}
Class: org.apache.hadoop.hdfs.TestHdfsAdmin
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that we can set and clear quotas via {@link HdfsAdmin}.
*/
@Test public void testHdfsAdminSetQuota() throws Exception {
HdfsAdmin dfsAdmin=new HdfsAdmin(FileSystem.getDefaultUri(conf),conf);
FileSystem fs=null;
try {
fs=FileSystem.get(conf);
assertTrue(fs.mkdirs(TEST_PATH));
assertEquals(-1,fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(-1,fs.getContentSummary(TEST_PATH).getSpaceQuota());
dfsAdmin.setSpaceQuota(TEST_PATH,10);
assertEquals(-1,fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(10,fs.getContentSummary(TEST_PATH).getSpaceQuota());
dfsAdmin.setQuota(TEST_PATH,10);
assertEquals(10,fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(10,fs.getContentSummary(TEST_PATH).getSpaceQuota());
dfsAdmin.clearSpaceQuota(TEST_PATH);
assertEquals(10,fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(-1,fs.getContentSummary(TEST_PATH).getSpaceQuota());
dfsAdmin.clearQuota(TEST_PATH);
assertEquals(-1,fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(-1,fs.getContentSummary(TEST_PATH).getSpaceQuota());
}
finally {
if (fs != null) {
fs.close();
}
}
}
Class: org.apache.hadoop.hdfs.TestLease
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@SuppressWarnings("unchecked") @Test public void testFactory() throws Exception {
final String[] groups=new String[]{"supergroup"};
final UserGroupInformation[] ugi=new UserGroupInformation[3];
for (int i=0; i < ugi.length; i++) {
ugi[i]=UserGroupInformation.createUserForTesting("user" + i,groups);
}
Mockito.doReturn(new HdfsFileStatus(0,false,1,1024,0,0,new FsPermission((short)777),"owner","group",new byte[0],new byte[0],1010,0,null)).when(mcp).getFileInfo(anyString());
Mockito.doReturn(new HdfsFileStatus(0,false,1,1024,0,0,new FsPermission((short)777),"owner","group",new byte[0],new byte[0],1010,0,null)).when(mcp).create(anyString(),(FsPermission)anyObject(),anyString(),(EnumSetWritable)anyObject(),anyBoolean(),anyShort(),anyLong(),(List)anyList());
final Configuration conf=new Configuration();
final DFSClient c1=createDFSClientAs(ugi[0],conf);
FSDataOutputStream out1=createFsOut(c1,"/out1");
final DFSClient c2=createDFSClientAs(ugi[0],conf);
FSDataOutputStream out2=createFsOut(c2,"/out2");
Assert.assertEquals(c1.getLeaseRenewer(),c2.getLeaseRenewer());
final DFSClient c3=createDFSClientAs(ugi[1],conf);
FSDataOutputStream out3=createFsOut(c3,"/out3");
Assert.assertTrue(c1.getLeaseRenewer() != c3.getLeaseRenewer());
final DFSClient c4=createDFSClientAs(ugi[1],conf);
FSDataOutputStream out4=createFsOut(c4,"/out4");
Assert.assertEquals(c3.getLeaseRenewer(),c4.getLeaseRenewer());
final DFSClient c5=createDFSClientAs(ugi[2],conf);
FSDataOutputStream out5=createFsOut(c5,"/out5");
Assert.assertTrue(c1.getLeaseRenewer() != c5.getLeaseRenewer());
Assert.assertTrue(c3.getLeaseRenewer() != c5.getLeaseRenewer());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that we can open up a file for write, move it to another location,
* and then create a new file in the previous location, without causing any
* lease conflicts. This is possible because we now use unique inode IDs
* to identify files to the NameNode.
*/
@Test public void testLeaseAfterRenameAndRecreate() throws Exception {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
final Path path1=new Path("/test-file");
final String contents1="contents1";
final Path path2=new Path("/test-file-new-location");
final String contents2="contents2";
FileSystem fs=cluster.getFileSystem();
FSDataOutputStream out1=fs.create(path1);
out1.writeBytes(contents1);
Assert.assertTrue(hasLease(cluster,path1));
Assert.assertEquals(1,leaseCount(cluster));
DistributedFileSystem fs2=(DistributedFileSystem)FileSystem.newInstance(fs.getUri(),fs.getConf());
fs2.rename(path1,path2);
FSDataOutputStream out2=fs2.create(path1);
out2.writeBytes(contents2);
out2.close();
Assert.assertTrue(hasLease(cluster,path2));
out1.close();
DistributedFileSystem fs3=(DistributedFileSystem)FileSystem.newInstance(fs.getUri(),fs.getConf());
Assert.assertEquals(contents1,DFSTestUtil.readFile(fs3,path2));
Assert.assertEquals(contents2,DFSTestUtil.readFile(fs3,path1));
}
finally {
cluster.shutdown();
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testLeaseAfterRename() throws Exception {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
Path p=new Path("/test-file");
Path d=new Path("/test-d");
Path d2=new Path("/test-d-other");
FileSystem fs=cluster.getFileSystem();
FSDataOutputStream out=fs.create(p);
out.writeBytes("something");
Assert.assertTrue(hasLease(cluster,p));
Assert.assertEquals(1,leaseCount(cluster));
DistributedFileSystem fs2=(DistributedFileSystem)FileSystem.newInstance(fs.getUri(),fs.getConf());
LOG.info("DMS: rename file into dir");
Path pRenamed=new Path(d,p.getName());
fs2.mkdirs(d);
fs2.rename(p,pRenamed);
Assert.assertFalse(p + " exists",fs2.exists(p));
Assert.assertTrue(pRenamed + " not found",fs2.exists(pRenamed));
Assert.assertFalse("has lease for " + p,hasLease(cluster,p));
Assert.assertTrue("no lease for " + pRenamed,hasLease(cluster,pRenamed));
Assert.assertEquals(1,leaseCount(cluster));
LOG.info("DMS: rename parent dir");
Path pRenamedAgain=new Path(d2,pRenamed.getName());
fs2.rename(d,d2);
Assert.assertFalse(d + " exists",fs2.exists(d));
Assert.assertFalse("has lease for " + pRenamed,hasLease(cluster,pRenamed));
Assert.assertTrue(d2 + " not found",fs2.exists(d2));
Assert.assertTrue(pRenamedAgain + " not found",fs2.exists(pRenamedAgain));
Assert.assertTrue("no lease for " + pRenamedAgain,hasLease(cluster,pRenamedAgain));
Assert.assertEquals(1,leaseCount(cluster));
LOG.info("DMS: rename parent again");
pRenamed=pRenamedAgain;
pRenamedAgain=new Path(new Path(d,d2.getName()),p.getName());
fs2.mkdirs(d);
fs2.rename(d2,d);
Assert.assertFalse(d2 + " exists",fs2.exists(d2));
Assert.assertFalse("no lease for " + pRenamed,hasLease(cluster,pRenamed));
Assert.assertTrue(d + " not found",fs2.exists(d));
Assert.assertTrue(pRenamedAgain + " not found",fs2.exists(pRenamedAgain));
Assert.assertTrue("no lease for " + pRenamedAgain,hasLease(cluster,pRenamedAgain));
Assert.assertEquals(1,leaseCount(cluster));
pRenamed=pRenamedAgain;
pRenamedAgain=new Path(d2,p.getName());
fs2.rename(pRenamed.getParent(),d2,Options.Rename.OVERWRITE);
Assert.assertFalse(pRenamed.getParent() + " not found",fs2.exists(pRenamed.getParent()));
Assert.assertFalse("has lease for " + pRenamed,hasLease(cluster,pRenamed));
Assert.assertTrue(d2 + " not found",fs2.exists(d2));
Assert.assertTrue(pRenamedAgain + " not found",fs2.exists(pRenamedAgain));
Assert.assertTrue("no lease for " + pRenamedAgain,hasLease(cluster,pRenamedAgain));
Assert.assertEquals(1,leaseCount(cluster));
pRenamed=pRenamedAgain;
pRenamedAgain=new Path(d,p.getName());
fs2.rename(pRenamed.getParent(),d,Options.Rename.OVERWRITE);
Assert.assertFalse(pRenamed.getParent() + " not found",fs2.exists(pRenamed.getParent()));
Assert.assertFalse("has lease for " + pRenamed,hasLease(cluster,pRenamed));
Assert.assertTrue(d + " not found",fs2.exists(d));
Assert.assertTrue(pRenamedAgain + " not found",fs2.exists(pRenamedAgain));
Assert.assertTrue("no lease for " + pRenamedAgain,hasLease(cluster,pRenamedAgain));
Assert.assertEquals(1,leaseCount(cluster));
out.close();
}
finally {
cluster.shutdown();
}
}
Class: org.apache.hadoop.hdfs.TestLeaseRecovery
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
/**
* The following test first creates a file with a few blocks.
* It randomly truncates the replica of the last block stored in each datanode.
* Finally, it triggers block synchronization to synchronize all stored block.
*/
@Test public void testBlockSynchronization() throws Exception {
final int ORG_FILE_SIZE=3000;
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,BLOCK_SIZE);
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
cluster.waitActive();
DistributedFileSystem dfs=cluster.getFileSystem();
String filestr="/foo";
Path filepath=new Path(filestr);
DFSTestUtil.createFile(dfs,filepath,ORG_FILE_SIZE,REPLICATION_NUM,0L);
assertTrue(dfs.exists(filepath));
DFSTestUtil.waitReplication(dfs,filepath,REPLICATION_NUM);
LocatedBlock locatedblock=TestInterDatanodeProtocol.getLastLocatedBlock(dfs.dfs.getNamenode(),filestr);
DatanodeInfo[] datanodeinfos=locatedblock.getLocations();
assertEquals(REPLICATION_NUM,datanodeinfos.length);
DataNode[] datanodes=new DataNode[REPLICATION_NUM];
for (int i=0; i < REPLICATION_NUM; i++) {
datanodes[i]=cluster.getDataNode(datanodeinfos[i].getIpcPort());
assertTrue(datanodes[i] != null);
}
ExtendedBlock lastblock=locatedblock.getBlock();
DataNode.LOG.info("newblocks=" + lastblock);
for (int i=0; i < REPLICATION_NUM; i++) {
checkMetaInfo(lastblock,datanodes[i]);
}
DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName);
cluster.getNameNodeRpc().append(filestr,dfs.dfs.clientName);
waitLeaseRecovery(cluster);
Block[] updatedmetainfo=new Block[REPLICATION_NUM];
long oldSize=lastblock.getNumBytes();
lastblock=TestInterDatanodeProtocol.getLastLocatedBlock(dfs.dfs.getNamenode(),filestr).getBlock();
long currentGS=lastblock.getGenerationStamp();
for (int i=0; i < REPLICATION_NUM; i++) {
updatedmetainfo[i]=DataNodeTestUtils.getFSDataset(datanodes[i]).getStoredBlock(lastblock.getBlockPoolId(),lastblock.getBlockId());
assertEquals(lastblock.getBlockId(),updatedmetainfo[i].getBlockId());
assertEquals(oldSize,updatedmetainfo[i].getNumBytes());
assertEquals(currentGS,updatedmetainfo[i].getGenerationStamp());
}
System.out.println("Testing that lease recovery cannot happen during safemode.");
filestr="/foo.safemode";
filepath=new Path(filestr);
dfs.create(filepath,(short)1);
cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER,false);
assertTrue(dfs.dfs.exists(filestr));
DFSTestUtil.waitReplication(dfs,filepath,(short)1);
waitLeaseRecovery(cluster);
LeaseManager lm=NameNodeAdapter.getLeaseManager(cluster.getNamesystem());
assertTrue("Found " + lm.countLease() + " lease, expected 1",lm.countLease() == 1);
cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE,false);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Class: org.apache.hadoop.hdfs.TestLeaseRecovery2
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
/**
* This test makes the client does not renew its lease and also
* set the hard lease expiration period to be short 1s. Thus triggering
* lease expiration to happen while the client is still alive.
* The test makes sure that the lease recovery completes and the client
* fails if it continues to write to the file.
* @throws Exception
*/
@Test public void testHardLeaseRecovery() throws Exception {
String filestr="/hardLeaseRecovery";
AppendTestUtil.LOG.info("filestr=" + filestr);
Path filepath=new Path(filestr);
FSDataOutputStream stm=dfs.create(filepath,true,BUF_SIZE,REPLICATION_NUM,BLOCK_SIZE);
assertTrue(dfs.dfs.exists(filestr));
int size=AppendTestUtil.nextInt(FILE_SIZE);
AppendTestUtil.LOG.info("size=" + size);
stm.write(buffer,0,size);
AppendTestUtil.LOG.info("hflush");
stm.hflush();
AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
dfs.dfs.getLeaseRenewer().interruptAndJoin();
cluster.setLeasePeriod(LONG_LEASE_PERIOD,SHORT_LEASE_PERIOD);
LocatedBlocks locatedBlocks;
do {
Thread.sleep(SHORT_LEASE_PERIOD);
locatedBlocks=dfs.dfs.getLocatedBlocks(filestr,0L,size);
}
while (locatedBlocks.isUnderConstruction());
assertEquals(size,locatedBlocks.getFileLength());
try {
stm.write('b');
stm.close();
fail("Writer thread should have been killed");
}
catch ( IOException e) {
e.printStackTrace();
}
AppendTestUtil.LOG.info("File size is good. Now validating sizes from datanodes...");
AppendTestUtil.checkFullFile(dfs,filepath,size,buffer,filestr);
}
Class: org.apache.hadoop.hdfs.TestLeaseRenewer
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testThreadName() throws Exception {
DFSOutputStream mockStream=Mockito.mock(DFSOutputStream.class);
long fileId=789L;
Assert.assertFalse("Renewer not initially running",renewer.isRunning());
renewer.put(fileId,mockStream,MOCK_DFSCLIENT);
Assert.assertTrue("Renewer should have started running",renewer.isRunning());
String threadName=renewer.getDaemonName();
Assert.assertEquals("LeaseRenewer:myuser@hdfs://nn1/",threadName);
renewer.closeFile(fileId,MOCK_DFSCLIENT);
renewer.setEmptyTime(Time.now());
long failTime=Time.now() + 5000;
while (renewer.isRunning() && Time.now() < failTime) {
Thread.sleep(50);
}
Assert.assertFalse(renewer.isRunning());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Bring up two clusters and assert that they are in different directories.
* @throws Throwable on a failure
*/
@Test(timeout=100000) public void testDualClusters() throws Throwable {
File testDataCluster2=new File(testDataPath,CLUSTER_2);
File testDataCluster3=new File(testDataPath,CLUSTER_3);
Configuration conf=new HdfsConfiguration();
String c2Path=testDataCluster2.getAbsolutePath();
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,c2Path);
MiniDFSCluster cluster2=new MiniDFSCluster.Builder(conf).build();
MiniDFSCluster cluster3=null;
try {
String dataDir2=cluster2.getDataDirectory();
assertEquals(new File(c2Path + "/data"),new File(dataDir2));
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,testDataCluster3.getAbsolutePath());
MiniDFSCluster.Builder builder=new MiniDFSCluster.Builder(conf);
cluster3=builder.build();
String dataDir3=cluster3.getDataDirectory();
assertTrue("Clusters are bound to the same directory: " + dataDir2,!dataDir2.equals(dataDir3));
}
finally {
MiniDFSCluster.shutdownCluster(cluster3);
MiniDFSCluster.shutdownCluster(cluster2);
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Verify that without system properties the cluster still comes up, provided
* the configuration is set
* @throws Throwable on a failure
*/
@Test(timeout=100000) public void testClusterWithoutSystemProperties() throws Throwable {
System.clearProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA);
Configuration conf=new HdfsConfiguration();
File testDataCluster1=new File(testDataPath,CLUSTER_1);
String c1Path=testDataCluster1.getAbsolutePath();
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,c1Path);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
try {
assertEquals(new File(c1Path + "/data"),new File(cluster.getDataDirectory()));
}
finally {
cluster.shutdown();
}
}
InternalCallVerifierAssumptionSetterEqualityVerifierHybridVerifier
/**
* MiniDFSCluster should not clobber dfs.datanode.hostname if requested
*/
@Test(timeout=100000) public void testClusterSetDatanodeHostname() throws Throwable {
assumeTrue(System.getProperty("os.name").startsWith("Linux"));
Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY,"MYHOST");
File testDataCluster5=new File(testDataPath,CLUSTER_5);
String c5Path=testDataCluster5.getAbsolutePath();
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,c5Path);
MiniDFSCluster cluster5=new MiniDFSCluster.Builder(conf).numDataNodes(1).checkDataNodeHostConfig(true).build();
try {
assertEquals("DataNode hostname config not respected","MYHOST",cluster5.getDataNodes().get(0).getDatanodeId().getHostName());
}
finally {
MiniDFSCluster.shutdownCluster(cluster5);
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Creates and closes a file of certain length.
* Calls append to allow next write() operation to add to the end of it
* After write() invocation, calls hflush() to make sure that data sunk through
* the pipeline and check the state of the last block's replica.
* It supposes to be in RBW state
* @throws IOException in case of an error
*/
@Test public void pipeline_01() throws IOException {
final String METHOD_NAME=GenericTestUtils.getMethodName();
if (LOG.isDebugEnabled()) {
LOG.debug("Running " + METHOD_NAME);
}
Path filePath=new Path("/" + METHOD_NAME + ".dat");
DFSTestUtil.createFile(fs,filePath,FILE_SIZE,REPL_FACTOR,rand.nextLong());
if (LOG.isDebugEnabled()) {
LOG.debug("Invoking append but doing nothing otherwise...");
}
FSDataOutputStream ofs=fs.append(filePath);
ofs.writeBytes("Some more stuff to write");
((DFSOutputStream)ofs.getWrappedStream()).hflush();
List lb=cluster.getNameNodeRpc().getBlockLocations(filePath.toString(),FILE_SIZE - 1,FILE_SIZE).getLocatedBlocks();
String bpid=cluster.getNamesystem().getBlockPoolId();
for ( DataNode dn : cluster.getDataNodes()) {
Replica r=DataNodeTestUtils.fetchReplicaInfo(dn,bpid,lb.get(0).getBlock().getBlockId());
assertTrue("Replica on DN " + dn + " shouldn't be null",r != null);
assertEquals("Should be RBW replica on " + dn + " after sequence of calls append()/write()/hflush()",HdfsServerConstants.ReplicaState.RBW,r.getState());
}
ofs.close();
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Like the previous test but create many files. This covers bugs where
* the quota adjustment is incorrect but it takes many files to accrue
* a big enough accounting error to violate the quota.
*/
@Test public void testMultipleFilesSmallerThanOneBlock() throws Exception {
Configuration conf=new HdfsConfiguration();
final int BLOCK_SIZE=6 * 1024;
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,BLOCK_SIZE);
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,true);
conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY,2);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
DFSAdmin admin=new DFSAdmin(conf);
final String nnAddr=conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
final String webhdfsuri=WebHdfsFileSystem.SCHEME + "://" + nnAddr;
System.out.println("webhdfsuri=" + webhdfsuri);
final FileSystem webhdfs=new Path(webhdfsuri).getFileSystem(conf);
try {
long nsQuota=FSImageTestUtil.getNSQuota(cluster.getNameNode().getNamesystem());
assertTrue("Default namespace quota expected as long max. But the value is :" + nsQuota,nsQuota == Long.MAX_VALUE);
Path dir=new Path("/test");
boolean exceededQuota=false;
ContentSummary c;
final int FILE_SIZE=1024;
final int QUOTA_SIZE=32 * (int)fs.getDefaultBlockSize(dir);
assertEquals(6 * 1024,fs.getDefaultBlockSize(dir));
assertEquals(192 * 1024,QUOTA_SIZE);
assertTrue(fs.mkdirs(dir));
runCommand(admin,false,"-setSpaceQuota",Integer.toString(QUOTA_SIZE),dir.toString());
for (int i=0; i < 59; i++) {
Path file=new Path("/test/test" + i);
DFSTestUtil.createFile(fs,file,FILE_SIZE,(short)3,1L);
DFSTestUtil.waitReplication(fs,file,(short)3);
}
c=fs.getContentSummary(dir);
checkContentSummary(c,webhdfs.getContentSummary(dir));
assertEquals("Invalid space consumed",59 * FILE_SIZE * 3,c.getSpaceConsumed());
assertEquals("Invalid space consumed",QUOTA_SIZE - (59 * FILE_SIZE * 3),3 * (fs.getDefaultBlockSize(dir) - FILE_SIZE));
try {
Path file=new Path("/test/test59");
DFSTestUtil.createFile(fs,file,FILE_SIZE,(short)3,1L);
DFSTestUtil.waitReplication(fs,file,(short)3);
}
catch ( QuotaExceededException e) {
exceededQuota=true;
}
assertTrue("Quota not exceeded",exceededQuota);
assertEquals(2,cluster.getNamesystem().getFSDirectory().getYieldCount());
}
finally {
cluster.shutdown();
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testRollbackWithQJM() throws Exception {
final Configuration conf=new HdfsConfiguration();
MiniJournalCluster mjc=null;
MiniDFSCluster cluster=null;
final Path foo=new Path("/foo");
final Path bar=new Path("/bar");
try {
mjc=new MiniJournalCluster.Builder(conf).numJournalNodes(NUM_JOURNAL_NODES).build();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,mjc.getQuorumJournalURI(JOURNAL_ID).toString());
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
DistributedFileSystem dfs=cluster.getFileSystem();
final DFSAdmin dfsadmin=new DFSAdmin(conf);
dfs.mkdirs(foo);
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
Assert.assertEquals(0,dfsadmin.run(new String[]{"-rollingUpgrade","prepare"}));
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
dfs.mkdirs(bar);
dfs.close();
cluster.restartNameNode("-rollingUpgrade","rollback");
dfs=cluster.getFileSystem();
Assert.assertTrue(dfs.exists(foo));
Assert.assertFalse(dfs.exists(bar));
for (int i=0; i < NUM_JOURNAL_NODES; i++) {
File dir=mjc.getCurrentDir(0,JOURNAL_ID);
checkJNStorage(dir,4,7);
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
if (mjc != null) {
mjc.shutdown();
}
}
}
Class: org.apache.hadoop.hdfs.TestSafeMode
InternalCallVerifierEqualityVerifier
/**
* Test that, if there are no blocks in the filesystem,
* the NameNode doesn't enter the "safemode extension" period.
*/
@Test(timeout=45000) public void testNoExtensionIfNoBlocks() throws IOException {
cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY,60000);
cluster.restartNameNode();
String status=cluster.getNameNode().getNamesystem().getSafemode();
assertEquals("",status);
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that the NN initializes its under-replicated blocks queue
* before it is ready to exit safemode (HDFS-1476)
*/
@Test(timeout=45000) public void testInitializeReplQueuesEarly() throws Exception {
LOG.info("Starting testInitializeReplQueuesEarly");
BlockManagerTestUtil.setWritingPrefersLocalNode(cluster.getNamesystem().getBlockManager(),false);
cluster.startDataNodes(conf,2,true,StartupOption.REGULAR,null);
cluster.waitActive();
LOG.info("Creating files");
DFSTestUtil.createFile(fs,TEST_PATH,15 * BLOCK_SIZE,(short)1,1L);
LOG.info("Stopping all DataNodes");
List dnprops=Lists.newLinkedList();
dnprops.add(cluster.stopDataNode(0));
dnprops.add(cluster.stopDataNode(0));
dnprops.add(cluster.stopDataNode(0));
cluster.getConfiguration(0).setFloat(DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY,1f / 15f);
LOG.info("Restarting NameNode");
cluster.restartNameNode();
final NameNode nn=cluster.getNameNode();
String status=nn.getNamesystem().getSafemode();
assertEquals("Safe mode is ON. The reported blocks 0 needs additional " + "15 blocks to reach the threshold 0.9990 of total blocks 15.\n" + "The number of live datanodes 0 has reached the minimum number 0. "+ "Safe mode will be turned off automatically once the thresholds "+ "have been reached.",status);
assertFalse("Mis-replicated block queues should not be initialized " + "until threshold is crossed",NameNodeAdapter.safeModeInitializedReplQueues(nn));
LOG.info("Restarting one DataNode");
cluster.restartDataNode(dnprops.remove(0));
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
return getLongCounter("StorageBlockReportOps",getMetrics(NN_METRICS)) == cluster.getStoragesPerDatanode();
}
}
,10,10000);
final int safe=NameNodeAdapter.getSafeModeSafeBlocks(nn);
assertTrue("Expected first block report to make some blocks safe.",safe > 0);
assertTrue("Did not expect first block report to make all blocks safe.",safe < 15);
assertTrue(NameNodeAdapter.safeModeInitializedReplQueues(nn));
BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager());
long underReplicatedBlocks=nn.getNamesystem().getUnderReplicatedBlocks();
while (underReplicatedBlocks != (15 - safe)) {
LOG.info("UnderReplicatedBlocks expected=" + (15 - safe) + ", actual="+ underReplicatedBlocks);
Thread.sleep(100);
BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager());
underReplicatedBlocks=nn.getNamesystem().getUnderReplicatedBlocks();
}
cluster.restartDataNodes();
}
InternalCallVerifierEqualityVerifier
/**
* Test that, when under-replicated blocks are processed at the end of
* safe-mode, blocks currently under construction are not considered
* under-construction or missing. Regression test for HDFS-2822.
*/
@Test public void testRbwBlocksNotConsideredUnderReplicated() throws IOException {
List stms=Lists.newArrayList();
try {
DFSTestUtil.createFile(fs,new Path("/junk-blocks"),BLOCK_SIZE * 4,(short)1,1L);
for (int i=0; i < 10; i++) {
FSDataOutputStream stm=fs.create(new Path("/append-" + i),true,BLOCK_SIZE,(short)1,BLOCK_SIZE);
stms.add(stm);
stm.write(1);
stm.hflush();
}
cluster.restartNameNode();
FSNamesystem ns=cluster.getNameNode(0).getNamesystem();
BlockManagerTestUtil.updateState(ns.getBlockManager());
assertEquals(0,ns.getPendingReplicationBlocks());
assertEquals(0,ns.getCorruptReplicaBlocks());
assertEquals(0,ns.getMissingBlocksCount());
}
finally {
for ( FSDataOutputStream stm : stms) {
IOUtils.closeStream(stm);
}
cluster.shutdown();
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Verify that the NameNode stays in safemode when dfs.safemode.datanode.min
* is set to a number greater than the number of live datanodes.
*/
@Test public void testDatanodeThreshold() throws IOException {
cluster.shutdown();
Configuration conf=cluster.getConfiguration(0);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY,0);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY,1);
cluster.restartNameNode();
fs=cluster.getFileSystem();
String tipMsg=cluster.getNamesystem().getSafemode();
assertTrue("Safemode tip message doesn't look right: " + tipMsg,tipMsg.contains("The number of live datanodes 0 needs an additional " + "1 live datanodes to reach the minimum number 1.\n" + "Safe mode will be turned off automatically"));
cluster.startDataNodes(conf,1,true,null,null);
try {
Thread.sleep(1000);
}
catch ( InterruptedException ignored) {
}
assertEquals("",cluster.getNamesystem().getSafemode());
}
InternalCallVerifierEqualityVerifierExceptionVerifierHybridVerifier
/**
* Test (expected to throw IOE) for FSDataInpuStream#seek
* when the position argument is larger than the file size.
*/
@Test(expected=IOException.class) public void testSeekPastFileSize() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=cluster.getFileSystem();
try {
Path seekFile=new Path("seekboundaries.dat");
DFSTestUtil.createFile(fs,seekFile,ONEMB,ONEMB,fs.getDefaultBlockSize(seekFile),fs.getDefaultReplication(seekFile),seed);
FSDataInputStream stream=fs.open(seekFile);
stream.seek(65536);
assertEquals(65536,stream.getPos());
stream.seek(ONEMB + ONEMB + ONEMB);
}
finally {
fs.close();
cluster.shutdown();
}
}
Class: org.apache.hadoop.hdfs.TestSetTimes
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Tests mod time change at close in DFS.
*/
@Test public void testTimesAtClose() throws IOException {
Configuration conf=new HdfsConfiguration();
final int MAX_IDLE_TIME=2000;
int replicas=1;
conf.setInt("ipc.client.connection.maxidletime",MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY,50);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(addr,conf);
DatanodeInfo[] info=client.datanodeReport(DatanodeReportType.LIVE);
assertEquals("Number of Datanodes ",numDatanodes,info.length);
FileSystem fileSys=cluster.getFileSystem();
assertTrue(fileSys instanceof DistributedFileSystem);
try {
Path file1=new Path("/simple.dat");
FSDataOutputStream stm=writeFile(fileSys,file1,replicas);
System.out.println("Created and wrote file simple.dat");
FileStatus statBeforeClose=fileSys.getFileStatus(file1);
long mtimeBeforeClose=statBeforeClose.getModificationTime();
String mdateBeforeClose=dateForm.format(new Date(mtimeBeforeClose));
System.out.println("mtime on " + file1 + " before close is "+ mdateBeforeClose+ " ("+ mtimeBeforeClose+ ")");
assertTrue(mtimeBeforeClose != 0);
stm.close();
System.out.println("Closed file.");
FileStatus statAfterClose=fileSys.getFileStatus(file1);
long mtimeAfterClose=statAfterClose.getModificationTime();
String mdateAfterClose=dateForm.format(new Date(mtimeAfterClose));
System.out.println("mtime on " + file1 + " after close is "+ mdateAfterClose+ " ("+ mtimeAfterClose+ ")");
assertTrue(mtimeAfterClose != 0);
assertTrue(mtimeBeforeClose != mtimeAfterClose);
cleanupFile(fileSys,file1);
}
catch ( IOException e) {
info=client.datanodeReport(DatanodeReportType.ALL);
printDatanodeReport(info);
throw e;
}
finally {
fileSys.close();
cluster.shutdown();
}
}
UtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Tests mod & access time in DFS.
*/
@Test public void testTimes() throws IOException {
Configuration conf=new HdfsConfiguration();
final int MAX_IDLE_TIME=2000;
conf.setInt("ipc.client.connection.maxidletime",MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
final int nnport=cluster.getNameNodePort();
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(addr,conf);
DatanodeInfo[] info=client.datanodeReport(DatanodeReportType.LIVE);
assertEquals("Number of Datanodes ",numDatanodes,info.length);
FileSystem fileSys=cluster.getFileSystem();
int replicas=1;
assertTrue(fileSys instanceof DistributedFileSystem);
try {
System.out.println("Creating testdir1 and testdir1/test1.dat.");
Path dir1=new Path("testdir1");
Path file1=new Path(dir1,"test1.dat");
FSDataOutputStream stm=writeFile(fileSys,file1,replicas);
FileStatus stat=fileSys.getFileStatus(file1);
long atimeBeforeClose=stat.getAccessTime();
String adate=dateForm.format(new Date(atimeBeforeClose));
System.out.println("atime on " + file1 + " before close is "+ adate+ " ("+ atimeBeforeClose+ ")");
assertTrue(atimeBeforeClose != 0);
stm.close();
stat=fileSys.getFileStatus(file1);
long atime1=stat.getAccessTime();
long mtime1=stat.getModificationTime();
adate=dateForm.format(new Date(atime1));
String mdate=dateForm.format(new Date(mtime1));
System.out.println("atime on " + file1 + " is "+ adate+ " ("+ atime1+ ")");
System.out.println("mtime on " + file1 + " is "+ mdate+ " ("+ mtime1+ ")");
assertTrue(atime1 != 0);
stat=fileSys.getFileStatus(dir1);
long mdir1=stat.getAccessTime();
assertTrue(mdir1 == 0);
long atime2=atime1 - (24L * 3600L * 1000L);
fileSys.setTimes(file1,-1,atime2);
stat=fileSys.getFileStatus(file1);
long atime3=stat.getAccessTime();
String adate3=dateForm.format(new Date(atime3));
System.out.println("new atime on " + file1 + " is "+ adate3+ " ("+ atime3+ ")");
assertTrue(atime2 == atime3);
assertTrue(mtime1 == stat.getModificationTime());
long mtime2=mtime1 - (3600L * 1000L);
fileSys.setTimes(file1,mtime2,-1);
stat=fileSys.getFileStatus(file1);
long mtime3=stat.getModificationTime();
String mdate3=dateForm.format(new Date(mtime3));
System.out.println("new mtime on " + file1 + " is "+ mdate3+ " ("+ mtime3+ ")");
assertTrue(atime2 == stat.getAccessTime());
assertTrue(mtime2 == mtime3);
long mtime4=Time.now() - (3600L * 1000L);
long atime4=Time.now();
fileSys.setTimes(dir1,mtime4,atime4);
stat=fileSys.getFileStatus(dir1);
assertTrue("Not matching the modification times",mtime4 == stat.getModificationTime());
assertTrue("Not matching the access times",atime4 == stat.getAccessTime());
Path nonExistingDir=new Path(dir1,"/nonExistingDir/");
try {
fileSys.setTimes(nonExistingDir,mtime4,atime4);
fail("Expecting FileNotFoundException");
}
catch ( FileNotFoundException e) {
assertTrue(e.getMessage().contains("File/Directory " + nonExistingDir.toString() + " does not exist."));
}
cluster.shutdown();
try {
Thread.sleep(2 * MAX_IDLE_TIME);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
System.out.println("Verifying times after cluster restart");
stat=fileSys.getFileStatus(file1);
assertTrue(atime2 == stat.getAccessTime());
assertTrue(mtime3 == stat.getModificationTime());
cleanupFile(fileSys,file1);
cleanupFile(fileSys,dir1);
}
catch ( IOException e) {
info=client.datanodeReport(DatanodeReportType.ALL);
printDatanodeReport(info);
throw e;
}
finally {
fileSys.close();
cluster.shutdown();
}
}
Class: org.apache.hadoop.hdfs.TestWriteRead
APIUtilityVerifierEqualityVerifier
/**
* Junit Test reading while writing.
*/
@Test public void testWriteReadSeq() throws IOException {
useFCOption=false;
positionReadOption=false;
String fname=filenameOption;
long rdBeginPos=0;
int stat=testWriteAndRead(fname,WR_NTIMES,WR_CHUNK_SIZE,rdBeginPos);
LOG.info("Summary status from test1: status= " + stat);
Assert.assertEquals(0,stat);
}
APIUtilityVerifierEqualityVerifier
/**
* Junit Test position read while writing.
*/
@Test public void testWriteReadPos() throws IOException {
String fname=filenameOption;
positionReadOption=true;
long rdBeginPos=0;
int stat=testWriteAndRead(fname,WR_NTIMES,WR_CHUNK_SIZE,rdBeginPos);
Assert.assertEquals(0,stat);
}
APIUtilityVerifierEqualityVerifier
/**
* Junit Test position read of the current block being written.
*/
@Test public void testReadPosCurrentBlock() throws IOException {
String fname=filenameOption;
positionReadOption=true;
int wrChunkSize=(int)(blockSize) + (int)(blockSize / 2);
long rdBeginPos=blockSize + 1;
int numTimes=5;
int stat=testWriteAndRead(fname,numTimes,wrChunkSize,rdBeginPos);
Assert.assertEquals(0,stat);
}
InternalCallVerifierEqualityVerifier
@Test public void testGetAccessRightsForUserGroup() throws IOException {
Nfs3FileAttributes attr=Mockito.mock(Nfs3FileAttributes.class);
Mockito.when(attr.getUid()).thenReturn(2);
Mockito.when(attr.getGid()).thenReturn(3);
Mockito.when(attr.getMode()).thenReturn(448);
Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSREG.toValue());
assertEquals("No access should be allowed as UID does not match attribute over mode 700",0,Nfs3Utils.getAccessRightsForUserGroup(3,3,null,attr));
Mockito.when(attr.getUid()).thenReturn(2);
Mockito.when(attr.getGid()).thenReturn(3);
Mockito.when(attr.getMode()).thenReturn(56);
Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSREG.toValue());
assertEquals("No access should be allowed as GID does not match attribute over mode 070",0,Nfs3Utils.getAccessRightsForUserGroup(2,4,null,attr));
Mockito.when(attr.getUid()).thenReturn(2);
Mockito.when(attr.getGid()).thenReturn(3);
Mockito.when(attr.getMode()).thenReturn(7);
Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSREG.toValue());
assertEquals("Access should be allowed as mode is 007 and UID/GID do not match",61,Nfs3Utils.getAccessRightsForUserGroup(1,4,new int[]{5,6},attr));
Mockito.when(attr.getUid()).thenReturn(2);
Mockito.when(attr.getGid()).thenReturn(10);
Mockito.when(attr.getMode()).thenReturn(288);
Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSREG.toValue());
assertEquals("Access should be allowed as mode is 440 and Aux GID does match",1,Nfs3Utils.getAccessRightsForUserGroup(3,4,new int[]{5,16,10},attr));
Mockito.when(attr.getUid()).thenReturn(2);
Mockito.when(attr.getGid()).thenReturn(10);
Mockito.when(attr.getMode()).thenReturn(448);
Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSDIR.toValue());
assertEquals("Access should be allowed for dir as mode is 700 and UID does match",31,Nfs3Utils.getAccessRightsForUserGroup(2,4,new int[]{5,16,10},attr));
assertEquals("No access should be allowed for dir as mode is 700 even though GID does match",0,Nfs3Utils.getAccessRightsForUserGroup(3,10,new int[]{5,16,4},attr));
assertEquals("No access should be allowed for dir as mode is 700 even though AuxGID does match",0,Nfs3Utils.getAccessRightsForUserGroup(3,20,new int[]{5,10},attr));
Mockito.when(attr.getUid()).thenReturn(2);
Mockito.when(attr.getGid()).thenReturn(10);
Mockito.when(attr.getMode()).thenReturn(457);
Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSDIR.toValue());
assertEquals("Access should be allowed for dir as mode is 711 and GID matches",2,Nfs3Utils.getAccessRightsForUserGroup(3,10,new int[]{5,16,11},attr));
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test to make sure NameNode.Feature support previous features
*/
@Test public void testNameNodeFeature(){
final LayoutFeature first=NameNodeLayoutVersion.Feature.ROLLING_UPGRADE;
assertTrue(NameNodeLayoutVersion.supports(LAST_NON_RESERVED_COMMON_FEATURE,first.getInfo().getLayoutVersion()));
assertEquals(LAST_COMMON_FEATURE.getInfo().getLayoutVersion() - 1,first.getInfo().getLayoutVersion());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test to make sure DataNode.Feature support previous features
*/
@Test public void testDataNodeFeature(){
final LayoutFeature first=DataNodeLayoutVersion.Feature.FIRST_LAYOUT;
assertTrue(DataNodeLayoutVersion.supports(LAST_NON_RESERVED_COMMON_FEATURE,first.getInfo().getLayoutVersion()));
assertEquals(LAST_COMMON_FEATURE.getInfo().getLayoutVersion() - 1,first.getInfo().getLayoutVersion());
}
APIUtilityVerifierEqualityVerifier
@Test public void testConvertLocatedBlockList(){
ArrayList lbl=new ArrayList();
for (int i=0; i < 3; i++) {
lbl.add(createLocatedBlock());
}
List lbpl=PBHelper.convertLocatedBlock2(lbl);
List lbl2=PBHelper.convertLocatedBlock(lbpl);
assertEquals(lbl.size(),lbl2.size());
for (int i=0; i < lbl.size(); i++) {
compare(lbl.get(i),lbl2.get(2));
}
}
APIUtilityVerifierEqualityVerifier
@Test public void testConvertLocatedBlockArray(){
LocatedBlock[] lbl=new LocatedBlock[3];
for (int i=0; i < 3; i++) {
lbl[i]=createLocatedBlock();
}
LocatedBlockProto[] lbpl=PBHelper.convertLocatedBlock(lbl);
LocatedBlock[] lbl2=PBHelper.convertLocatedBlock(lbpl);
assertEquals(lbl.length,lbl2.length);
for (int i=0; i < lbl.length; i++) {
compare(lbl[i],lbl2[i]);
}
}
InternalCallVerifierEqualityVerifier
@Test public void testConvertText(){
Text t=new Text("abc".getBytes());
String s=t.toString();
Text t1=new Text(s);
assertEquals(t,t1);
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test that, once the queue eclipses the configure size limit,
* calls to journal more data are rejected.
*/
@Test public void testQueueLimiting() throws Exception {
DelayAnswer delayer=new DelayAnswer(LOG);
Mockito.doAnswer(delayer).when(mockProxy).journal(Mockito.any(),Mockito.eq(1L),Mockito.eq(1L),Mockito.eq(1),Mockito.same(FAKE_DATA));
int numToQueue=LIMIT_QUEUE_SIZE_BYTES / FAKE_DATA.length;
for (int i=1; i <= numToQueue; i++) {
ch.sendEdits(1L,(long)i,1,FAKE_DATA);
}
assertEquals(LIMIT_QUEUE_SIZE_BYTES,ch.getQueuedEditsSize());
try {
ch.sendEdits(1L,numToQueue + 1,1,FAKE_DATA).get(1,TimeUnit.SECONDS);
fail("Did not fail to queue more calls after queue was full");
}
catch ( ExecutionException ee) {
if (!(ee.getCause() instanceof LoggerTooFarBehindException)) {
throw ee;
}
}
delayer.proceed();
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
return ch.getQueuedEditsSize() == 0;
}
}
,10,1000);
}
EqualityVerifier
/**
* Test another edge case discovered by randomized testing.
* Starts with the edge case state set up by{@link #setupEdgeCaseOneJnHasSegmentWithAcceptedRecovery()}Recovery 2:
* - New NN starts recovery and only talks to JN1 and JN2. JN0 has
* crashed. Since they have no logs open, they say they don't need
* recovery.
* - Before writing any transactions, JN0 comes back to life and
* JN1 crashes.
* - Starts writing segment 101, and writes 50 transactions before crashing.
* Recovery 3:
* - JN1 has come back to life. JN2 crashes.
* - New NN starts recovery and talks to all three. All three have
* segments open from txid 101, so it calls prepareRecovery(101)
* - JN0 has an already-accepted value for segment 101, so it replies
* "you should recover 101-101"
* - Former incorrect behavior: NN truncates logs to txid 101 even though
* it should have recovered through 150.
* In this case, even though there is an accepted recovery decision,
* the newer log segments should take precedence, since they were written
* in a newer epoch than the recorded decision.
*/
@Test public void testNewerVersionOfSegmentWins2() throws Exception {
setupEdgeCaseOneJnHasSegmentWithAcceptedRecovery();
cluster.getJournalNode(0).stopAndJoin(0);
qjm=createSpyingQJM();
try {
assertEquals(100,QJMTestUtil.recoverAndReturnLastTxn(qjm));
cluster.restartJournalNode(0);
cluster.getJournalNode(1).stopAndJoin(0);
writeSegment(cluster,qjm,101,50,false);
}
finally {
qjm.close();
}
cluster.restartJournalNode(1);
cluster.getJournalNode(2).stopAndJoin(0);
qjm=createSpyingQJM();
try {
assertEquals(150,QJMTestUtil.recoverAndReturnLastTxn(qjm));
}
finally {
qjm.close();
}
}
EqualityVerifier
/**
* Test an edge case discovered by randomized testing.
* Starts with the edge case state set up by{@link #setupEdgeCaseOneJnHasSegmentWithAcceptedRecovery()}Recovery 2:
* - New NN starts recovery and only talks to JN1 and JN2. JN0 has
* crashed. Since they have no logs open, they say they don't need
* recovery.
* - Starts writing segment 101, and writes 50 transactions before crashing.
* Recovery 3:
* - JN0 has come back to life.
* - New NN starts recovery and talks to all three. All three have
* segments open from txid 101, so it calls prepareRecovery(101)
* - JN0 has an already-accepted value for segment 101, so it replies
* "you should recover 101-101"
* - Former incorrect behavior: NN truncates logs to txid 101 even though
* it should have recovered through 150.
* In this case, even though there is an accepted recovery decision,
* the newer log segments should take precedence, since they were written
* in a newer epoch than the recorded decision.
*/
@Test public void testNewerVersionOfSegmentWins() throws Exception {
setupEdgeCaseOneJnHasSegmentWithAcceptedRecovery();
cluster.getJournalNode(0).stopAndJoin(0);
qjm=createSpyingQJM();
try {
assertEquals(100,QJMTestUtil.recoverAndReturnLastTxn(qjm));
writeSegment(cluster,qjm,101,50,false);
}
finally {
qjm.close();
}
cluster.restartJournalNode(0);
qjm=createSpyingQJM();
try {
assertEquals(150,QJMTestUtil.recoverAndReturnLastTxn(qjm));
}
finally {
qjm.close();
}
}
APIUtilityVerifierUtilityVerifierEqualityVerifierHybridVerifier
/**
* Test the case where one of the loggers misses a finalizeLogSegment()
* call, and then misses the next startLogSegment() call before coming
* back to life.
* Previously, this caused it to keep on writing to the old log segment,
* such that one logger had eg edits_1-10 while the others had edits_1-5 and
* edits_6-10. This caused recovery to fail in certain cases.
*/
@Test public void testMissFinalizeAndNextStart() throws Exception {
futureThrows(new IOException("injected")).when(spies.get(0)).finalizeLogSegment(Mockito.eq(1L),Mockito.eq(3L));
futureThrows(new IOException("injected")).when(spies.get(0)).startLogSegment(Mockito.eq(4L),Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
failLoggerAtTxn(spies.get(1),4L);
writeSegment(cluster,qjm,1,3,true);
EditLogOutputStream stm=qjm.startLogSegment(4,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
try {
writeTxns(stm,4,1);
fail("Did not fail to write");
}
catch ( QuorumException qe) {
GenericTestUtils.assertExceptionContains("Writer out of sync",qe);
}
finally {
stm.abort();
qjm.close();
}
cluster.getJournalNode(2).stopAndJoin(0);
qjm=createSpyingQJM();
long recovered=QJMTestUtil.recoverAndReturnLastTxn(qjm);
assertEquals(3L,recovered);
}
InternalCallVerifierEqualityVerifier
/**
* Test that, if the writer crashes at the very beginning of a segment,
* before any transactions are written, that the next newEpoch() call
* returns the prior segment txid as its most recent segment.
*/
@Test(timeout=10000) public void testNewEpochAtBeginningOfSegment() throws Exception {
journal.newEpoch(FAKE_NSINFO,1);
journal.startLogSegment(makeRI(1),1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
journal.journal(makeRI(2),1,1,2,QJMTestUtil.createTxnData(1,2));
journal.finalizeLogSegment(makeRI(3),1,2);
journal.startLogSegment(makeRI(4),3,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
NewEpochResponseProto resp=journal.newEpoch(FAKE_NSINFO,2);
assertEquals(1,resp.getLastSegmentTxId());
}
UtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=10000) public void testEpochHandling() throws Exception {
assertEquals(0,journal.getLastPromisedEpoch());
NewEpochResponseProto newEpoch=journal.newEpoch(FAKE_NSINFO,1);
assertFalse(newEpoch.hasLastSegmentTxId());
assertEquals(1,journal.getLastPromisedEpoch());
journal.newEpoch(FAKE_NSINFO,3);
assertFalse(newEpoch.hasLastSegmentTxId());
assertEquals(3,journal.getLastPromisedEpoch());
try {
journal.newEpoch(FAKE_NSINFO,3);
fail("Should have failed to promise same epoch twice");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Proposed epoch 3 <= last promise 3",ioe);
}
try {
journal.startLogSegment(makeRI(1),12345L,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
fail("Should have rejected call from prior epoch");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("epoch 1 is less than the last promised epoch 3",ioe);
}
try {
journal.journal(makeRI(1),12345L,100L,0,new byte[0]);
fail("Should have rejected call from prior epoch");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("epoch 1 is less than the last promised epoch 3",ioe);
}
}
UtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that the JournalNode performs correctly as a Paxos
* Acceptor process.
*/
@Test(timeout=100000) public void testAcceptRecoveryBehavior() throws Exception {
try {
ch.prepareRecovery(1L).get();
fail("Did not throw IllegalState when trying to run paxos without an epoch");
}
catch ( ExecutionException ise) {
GenericTestUtils.assertExceptionContains("bad epoch",ise);
}
ch.newEpoch(1).get();
ch.setEpoch(1);
PrepareRecoveryResponseProto prep=ch.prepareRecovery(1L).get();
System.err.println("Prep: " + prep);
assertFalse(prep.hasAcceptedInEpoch());
assertFalse(prep.hasSegmentState());
ch.startLogSegment(1L,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
ch.sendEdits(1L,1L,1,QJMTestUtil.createTxnData(1,1)).get();
prep=ch.prepareRecovery(1L).get();
System.err.println("Prep: " + prep);
assertFalse(prep.hasAcceptedInEpoch());
assertTrue(prep.hasSegmentState());
ch.acceptRecovery(prep.getSegmentState(),new URL("file:///dev/null")).get();
ch.newEpoch(2);
ch.setEpoch(2);
prep=ch.prepareRecovery(1L).get();
assertEquals(1L,prep.getAcceptedInEpoch());
assertEquals(1L,prep.getSegmentState().getEndTxId());
ch.setEpoch(1);
try {
ch.prepareRecovery(1L).get();
fail("prepare from earlier epoch not rejected");
}
catch ( ExecutionException ioe) {
GenericTestUtils.assertExceptionContains("epoch 1 is less than the last promised epoch 2",ioe);
}
try {
ch.acceptRecovery(prep.getSegmentState(),new URL("file:///dev/null")).get();
fail("accept from earlier epoch not rejected");
}
catch ( ExecutionException ioe) {
GenericTestUtils.assertExceptionContains("epoch 1 is less than the last promised epoch 2",ioe);
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* This test writes a file and gets the block locations without closing the
* file, and tests the block token in the last block. Block token is verified
* by ensuring it is of correct kind.
* @throws IOException
* @throws InterruptedException
*/
@Test public void testBlockTokenInLastLocatedBlock() throws IOException, InterruptedException {
Configuration conf=new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY,true);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,512);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
try {
FileSystem fs=cluster.getFileSystem();
String fileName="/testBlockTokenInLastLocatedBlock";
Path filePath=new Path(fileName);
FSDataOutputStream out=fs.create(filePath,(short)1);
out.write(new byte[1000]);
LocatedBlocks locatedBlocks=cluster.getNameNodeRpc().getBlockLocations(fileName,0,1000);
while (locatedBlocks.getLastLocatedBlock() == null) {
Thread.sleep(100);
locatedBlocks=cluster.getNameNodeRpc().getBlockLocations(fileName,0,1000);
}
Token token=locatedBlocks.getLastLocatedBlock().getBlockToken();
Assert.assertEquals(BlockTokenIdentifier.KIND_NAME,token.getKind());
out.close();
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifierIterativeVerifierBranchVerifierUtilityVerifierInternalCallVerifierAssumptionSetterEqualityVerifierHybridVerifier
/**
* Test that fast repeated invocations of createClientDatanodeProtocolProxy
* will not end up using up thousands of sockets. This is a regression test
* for HDFS-1965.
*/
@Test public void testBlockTokenRpcLeak() throws Exception {
Configuration conf=new Configuration();
conf.set(HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(conf);
Assume.assumeTrue(FD_DIR.exists());
BlockTokenSecretManager sm=new BlockTokenSecretManager(blockKeyUpdateInterval,blockTokenLifetime,0,"fake-pool",null);
Token token=sm.generateToken(block3,EnumSet.allOf(BlockTokenSecretManager.AccessMode.class));
final Server server=createMockDatanode(sm,token,conf);
server.start();
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
DatanodeID fakeDnId=DFSTestUtil.getLocalDatanodeID(addr.getPort());
ExtendedBlock b=new ExtendedBlock("fake-pool",new Block(12345L));
LocatedBlock fakeBlock=new LocatedBlock(b,new DatanodeInfo[0]);
fakeBlock.setBlockToken(token);
ClientDatanodeProtocol proxyToNoWhere=RPC.getProxy(ClientDatanodeProtocol.class,ClientDatanodeProtocol.versionID,new InetSocketAddress("1.1.1.1",1),UserGroupInformation.createRemoteUser("junk"),conf,NetUtils.getDefaultSocketFactory(conf));
ClientDatanodeProtocol proxy=null;
int fdsAtStart=countOpenFileDescriptors();
try {
long endTime=Time.now() + 3000;
while (Time.now() < endTime) {
proxy=DFSUtil.createClientDatanodeProtocolProxy(fakeDnId,conf,1000,false,fakeBlock);
assertEquals(block3.getBlockId(),proxy.getReplicaVisibleLength(block3));
if (proxy != null) {
RPC.stopProxy(proxy);
}
LOG.info("Num open fds:" + countOpenFileDescriptors());
}
int fdsAtEnd=countOpenFileDescriptors();
if (fdsAtEnd - fdsAtStart > 50) {
fail("Leaked " + (fdsAtEnd - fdsAtStart) + " fds!");
}
}
finally {
server.stop();
}
RPC.stopProxy(proxyToNoWhere);
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testBlockTokenRpc() throws Exception {
Configuration conf=new Configuration();
conf.set(HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(conf);
BlockTokenSecretManager sm=new BlockTokenSecretManager(blockKeyUpdateInterval,blockTokenLifetime,0,"fake-pool",null);
Token token=sm.generateToken(block3,EnumSet.allOf(BlockTokenSecretManager.AccessMode.class));
final Server server=createMockDatanode(sm,token,conf);
server.start();
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
final UserGroupInformation ticket=UserGroupInformation.createRemoteUser(block3.toString());
ticket.addToken(token);
ClientDatanodeProtocol proxy=null;
try {
proxy=DFSUtil.createClientDatanodeProtocolProxy(addr,ticket,conf,NetUtils.getDefaultSocketFactory(conf));
assertEquals(block3.getBlockId(),proxy.getReplicaVisibleLength(block3));
}
finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* Test parse method in Balancer#Cli class with threshold value out of
* boundaries.
*/
@Test(timeout=100000) public void testBalancerCliParseWithThresholdOutOfBoundaries(){
String parameters[]=new String[]{"-threshold","0"};
String reason="IllegalArgumentException is expected when threshold value" + " is out of boundary.";
try {
Balancer.Cli.parse(parameters);
fail(reason);
}
catch ( IllegalArgumentException e) {
assertEquals("Number out of range: threshold = 0.0",e.getMessage());
}
parameters=new String[]{"-threshold","101"};
try {
Balancer.Cli.parse(parameters);
fail(reason);
}
catch ( IllegalArgumentException e) {
assertEquals("Number out of range: threshold = 101.0",e.getMessage());
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test(timeout=100000) public void testUnknownDatanode() throws Exception {
Configuration conf=new HdfsConfiguration();
initConf(conf);
long distribution[]=new long[]{50 * CAPACITY / 100,70 * CAPACITY / 100,0 * CAPACITY / 100};
long capacities[]=new long[]{CAPACITY,CAPACITY,CAPACITY};
String racks[]=new String[]{RACK0,RACK1,RACK1};
int numDatanodes=distribution.length;
if (capacities.length != numDatanodes || racks.length != numDatanodes) {
throw new IllegalArgumentException("Array length is not the same");
}
final long totalUsedSpace=sum(distribution);
ExtendedBlock[] blocks=generateBlocks(conf,totalUsedSpace,(short)numDatanodes);
Block[][] blocksDN=distributeBlocks(blocks,(short)(numDatanodes - 1),distribution);
conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,"0.0f");
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).racks(racks).simulatedCapacities(capacities).build();
try {
cluster.waitActive();
client=NameNodeProxies.createProxy(conf,cluster.getFileSystem(0).getUri(),ClientProtocol.class).getProxy();
for (int i=0; i < 3; i++) {
cluster.injectBlocks(i,Arrays.asList(blocksDN[i]),null);
}
cluster.startDataNodes(conf,1,true,null,new String[]{RACK0},null,new long[]{CAPACITY});
cluster.triggerHeartbeats();
Collection namenodes=DFSUtil.getNsServiceRpcUris(conf);
Set datanodes=new HashSet();
datanodes.add(cluster.getDataNodes().get(0).getDatanodeId().getHostName());
Balancer.Parameters p=new Balancer.Parameters(Balancer.Parameters.DEFAULT.policy,Balancer.Parameters.DEFAULT.threshold,datanodes,Balancer.Parameters.DEFAULT.nodesToBeIncluded);
final int r=Balancer.run(namenodes,p,conf);
assertEquals(ExitStatus.SUCCESS.getExitCode(),r);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
/**
* Test a cluster with even distribution, then a new empty node is added to
* the cluster. Test start a cluster with specified number of nodes, and fills
* it to be 30% full (with a single file replicated identically to all
* datanodes); It then adds one new empty node and starts balancing.
*/
@Test(timeout=60000) public void testBalancerWithHANameNodes() throws Exception {
Configuration conf=new HdfsConfiguration();
TestBalancer.initConf(conf);
long newNodeCapacity=TestBalancer.CAPACITY;
String newNodeRack=TestBalancer.RACK2;
String[] racks=new String[]{TestBalancer.RACK0,TestBalancer.RACK1};
long[] capacities=new long[]{TestBalancer.CAPACITY,TestBalancer.CAPACITY};
assertEquals(capacities.length,racks.length);
int numOfDatanodes=capacities.length;
NNConf nn1Conf=new MiniDFSNNTopology.NNConf("nn1");
nn1Conf.setIpcPort(NameNode.DEFAULT_PORT);
Configuration copiedConf=new Configuration(conf);
cluster=new MiniDFSCluster.Builder(copiedConf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(capacities.length).racks(racks).simulatedCapacities(capacities).build();
HATestUtil.setFailoverConfigurations(cluster,conf);
try {
cluster.waitActive();
cluster.transitionToActive(1);
Thread.sleep(500);
client=NameNodeProxies.createProxy(conf,FileSystem.getDefaultUri(conf),ClientProtocol.class).getProxy();
long totalCapacity=TestBalancer.sum(capacities);
long totalUsedSpace=totalCapacity * 3 / 10;
TestBalancer.createFile(cluster,TestBalancer.filePath,totalUsedSpace / numOfDatanodes,(short)numOfDatanodes,1);
cluster.startDataNodes(conf,1,true,null,new String[]{newNodeRack},new long[]{newNodeCapacity});
totalCapacity+=newNodeCapacity;
TestBalancer.waitForHeartBeat(totalUsedSpace,totalCapacity,client,cluster);
Collection namenodes=DFSUtil.getNsServiceRpcUris(conf);
assertEquals(1,namenodes.size());
assertTrue(namenodes.contains(HATestUtil.getLogicalUri(cluster)));
final int r=Balancer.run(namenodes,Balancer.Parameters.DEFAULT,conf);
assertEquals(ExitStatus.SUCCESS.getExitCode(),r);
TestBalancer.waitForBalancer(totalUsedSpace,totalCapacity,client,cluster,Balancer.Parameters.DEFAULT);
}
finally {
cluster.shutdown();
}
}
EqualityVerifierPublicFieldVerifier
/**
* Create a cluster with even distribution, and a new empty node is added to
* the cluster, then test node-group locality for balancer policy.
*/
@Test(timeout=60000) public void testBalancerWithNodeGroup() throws Exception {
Configuration conf=createConf();
long[] capacities=new long[]{CAPACITY,CAPACITY,CAPACITY,CAPACITY};
String[] racks=new String[]{RACK0,RACK0,RACK1,RACK1};
String[] nodeGroups=new String[]{NODEGROUP0,NODEGROUP0,NODEGROUP1,NODEGROUP2};
int numOfDatanodes=capacities.length;
assertEquals(numOfDatanodes,racks.length);
assertEquals(numOfDatanodes,nodeGroups.length);
MiniDFSCluster.Builder builder=new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length).racks(racks).simulatedCapacities(capacities);
MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
cluster=new MiniDFSClusterWithNodeGroup(builder);
try {
cluster.waitActive();
client=NameNodeProxies.createProxy(conf,cluster.getFileSystem(0).getUri(),ClientProtocol.class).getProxy();
long totalCapacity=TestBalancer.sum(capacities);
long totalUsedSpace=totalCapacity * 2 / 10;
TestBalancer.createFile(cluster,filePath,totalUsedSpace / (numOfDatanodes / 2),(short)(numOfDatanodes / 2),0);
long newCapacity=CAPACITY;
String newRack=RACK1;
String newNodeGroup=NODEGROUP2;
cluster.startDataNodes(conf,1,true,null,new String[]{newRack},new long[]{newCapacity},new String[]{newNodeGroup});
totalCapacity+=newCapacity;
runBalancer(conf,totalUsedSpace,totalCapacity);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifierPublicFieldVerifier
/**
* Create a cluster with even distribution, and a new empty node is added to
* the cluster, then test rack locality for balancer policy.
*/
@Test(timeout=60000) public void testBalancerWithRackLocality() throws Exception {
Configuration conf=createConf();
long[] capacities=new long[]{CAPACITY,CAPACITY};
String[] racks=new String[]{RACK0,RACK1};
String[] nodeGroups=new String[]{NODEGROUP0,NODEGROUP1};
int numOfDatanodes=capacities.length;
assertEquals(numOfDatanodes,racks.length);
MiniDFSCluster.Builder builder=new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length).racks(racks).simulatedCapacities(capacities);
MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
cluster=new MiniDFSClusterWithNodeGroup(builder);
try {
cluster.waitActive();
client=NameNodeProxies.createProxy(conf,cluster.getFileSystem(0).getUri(),ClientProtocol.class).getProxy();
long totalCapacity=TestBalancer.sum(capacities);
long totalUsedSpace=totalCapacity * 3 / 10;
long length=totalUsedSpace / numOfDatanodes;
TestBalancer.createFile(cluster,filePath,length,(short)numOfDatanodes,0);
LocatedBlocks lbs=client.getBlockLocations(filePath.toUri().getPath(),0,length);
Set before=getBlocksOnRack(lbs.getLocatedBlocks(),RACK0);
long newCapacity=CAPACITY;
String newRack=RACK1;
String newNodeGroup=NODEGROUP2;
cluster.startDataNodes(conf,1,true,null,new String[]{newRack},new long[]{newCapacity},new String[]{newNodeGroup});
totalCapacity+=newCapacity;
runBalancerCanFinish(conf,totalUsedSpace,totalCapacity);
lbs=client.getBlockLocations(filePath.toUri().getPath(),0,length);
Set after=getBlocksOnRack(lbs.getLocatedBlocks(),RACK0);
assertEquals(before,after);
}
finally {
cluster.shutdown();
}
}
EqualityVerifierPublicFieldVerifier
/**
* Create a 4 nodes cluster: 2 nodes (n0, n1) in RACK0/NODEGROUP0, 1 node (n2)
* in RACK1/NODEGROUP1 and 1 node (n3) in RACK1/NODEGROUP2. Fill the cluster
* to 60% and 3 replicas, so n2 and n3 will have replica for all blocks according
* to replica placement policy with NodeGroup. As a result, n2 and n3 will be
* filled with 80% (60% x 4 / 3), and no blocks can be migrated from n2 and n3
* to n0 or n1 as balancer policy with node group. Thus, we expect the balancer
* to end in 5 iterations without move block process.
*/
@Test(timeout=60000) public void testBalancerEndInNoMoveProgress() throws Exception {
Configuration conf=createConf();
long[] capacities=new long[]{CAPACITY,CAPACITY,CAPACITY,CAPACITY};
String[] racks=new String[]{RACK0,RACK0,RACK1,RACK1};
String[] nodeGroups=new String[]{NODEGROUP0,NODEGROUP0,NODEGROUP1,NODEGROUP2};
int numOfDatanodes=capacities.length;
assertEquals(numOfDatanodes,racks.length);
assertEquals(numOfDatanodes,nodeGroups.length);
MiniDFSCluster.Builder builder=new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length).racks(racks).simulatedCapacities(capacities);
MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
cluster=new MiniDFSClusterWithNodeGroup(builder);
try {
cluster.waitActive();
client=NameNodeProxies.createProxy(conf,cluster.getFileSystem(0).getUri(),ClientProtocol.class).getProxy();
long totalCapacity=TestBalancer.sum(capacities);
long totalUsedSpace=totalCapacity * 6 / 10;
TestBalancer.createFile(cluster,filePath,totalUsedSpace / 3,(short)(3),0);
runBalancerCanFinish(conf,totalUsedSpace,totalCapacity);
}
finally {
cluster.shutdown();
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testAddStorage() throws Exception {
BlockInfo blockInfo=new BlockInfo(3);
final DatanodeStorageInfo storage=DFSTestUtil.createDatanodeStorageInfo("storageID","127.0.0.1");
boolean added=blockInfo.addStorage(storage);
Assert.assertTrue(added);
Assert.assertEquals(storage,blockInfo.getStorageInfo(0));
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testBlockListMoveToHead() throws Exception {
LOG.info("BlockInfo moveToHead tests...");
final int MAX_BLOCKS=10;
DatanodeStorageInfo dd=DFSTestUtil.createDatanodeStorageInfo("s1","1.1.1.1");
ArrayList blockList=new ArrayList(MAX_BLOCKS);
ArrayList blockInfoList=new ArrayList();
int headIndex;
int curIndex;
LOG.info("Building block list...");
for (int i=0; i < MAX_BLOCKS; i++) {
blockList.add(new Block(i,0,GenerationStamp.LAST_RESERVED_STAMP));
blockInfoList.add(new BlockInfo(blockList.get(i),3));
dd.addBlock(blockInfoList.get(i));
assertEquals("Find datanode should be 0",0,blockInfoList.get(i).findStorageInfo(dd));
}
LOG.info("Checking list length...");
assertEquals("Length should be MAX_BLOCK",MAX_BLOCKS,dd.numBlocks());
Iterator it=dd.getBlockIterator();
int len=0;
while (it.hasNext()) {
it.next();
len++;
}
assertEquals("There should be MAX_BLOCK blockInfo's",MAX_BLOCKS,len);
headIndex=dd.getBlockListHeadForTesting().findStorageInfo(dd);
LOG.info("Moving each block to the head of the list...");
for (int i=0; i < MAX_BLOCKS; i++) {
curIndex=blockInfoList.get(i).findStorageInfo(dd);
headIndex=dd.moveBlockToHead(blockInfoList.get(i),curIndex,headIndex);
assertEquals("Block should be at the head of the list now.",blockInfoList.get(i),dd.getBlockListHeadForTesting());
}
LOG.info("Moving head to the head...");
BlockInfo temp=dd.getBlockListHeadForTesting();
curIndex=0;
headIndex=0;
dd.moveBlockToHead(temp,curIndex,headIndex);
assertEquals("Moving head to the head of the list shopuld not change the list",temp,dd.getBlockListHeadForTesting());
LOG.info("Checking elements of the list...");
temp=dd.getBlockListHeadForTesting();
assertNotNull("Head should not be null",temp);
int c=MAX_BLOCKS - 1;
while (temp != null) {
assertEquals("Expected element is not on the list",blockInfoList.get(c--),temp);
temp=temp.getNext(0);
}
LOG.info("Moving random blocks to the head of the list...");
headIndex=dd.getBlockListHeadForTesting().findStorageInfo(dd);
Random rand=new Random();
for (int i=0; i < MAX_BLOCKS; i++) {
int j=rand.nextInt(MAX_BLOCKS);
curIndex=blockInfoList.get(j).findStorageInfo(dd);
headIndex=dd.moveBlockToHead(blockInfoList.get(j),curIndex,headIndex);
assertEquals("Block should be at the head of the list now.",blockInfoList.get(j),dd.getBlockListHeadForTesting());
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=60000) public void testSingleList(){
DatanodeDescriptor dn=new DatanodeDescriptor(new DatanodeID("127.0.0.1","localhost","abcd",5000,5001,5002,5003));
CachedBlock[] blocks=new CachedBlock[]{new CachedBlock(0L,(short)1,true),new CachedBlock(1L,(short)1,true),new CachedBlock(2L,(short)1,true)};
Assert.assertTrue("expected pending cached list to start off empty.",!dn.getPendingCached().iterator().hasNext());
Assert.assertTrue("expected cached list to start off empty.",!dn.getCached().iterator().hasNext());
Assert.assertTrue("expected pending uncached list to start off empty.",!dn.getPendingUncached().iterator().hasNext());
Assert.assertTrue(dn.getCached().add(blocks[0]));
Assert.assertTrue("expected pending cached list to still be empty.",!dn.getPendingCached().iterator().hasNext());
Assert.assertEquals("failed to insert blocks[0]",blocks[0],dn.getCached().iterator().next());
Assert.assertTrue("expected pending uncached list to still be empty.",!dn.getPendingUncached().iterator().hasNext());
Assert.assertTrue(dn.getCached().add(blocks[1]));
Iterator iter=dn.getCached().iterator();
Assert.assertEquals(blocks[0],iter.next());
Assert.assertEquals(blocks[1],iter.next());
Assert.assertTrue(!iter.hasNext());
Assert.assertTrue(dn.getCached().addFirst(blocks[2]));
iter=dn.getCached().iterator();
Assert.assertEquals(blocks[2],iter.next());
Assert.assertEquals(blocks[0],iter.next());
Assert.assertEquals(blocks[1],iter.next());
Assert.assertTrue(!iter.hasNext());
Assert.assertTrue(dn.getCached().remove(blocks[0]));
iter=dn.getCached().iterator();
Assert.assertEquals(blocks[2],iter.next());
Assert.assertEquals(blocks[1],iter.next());
Assert.assertTrue(!iter.hasNext());
dn.getCached().clear();
Assert.assertTrue("expected cached list to be empty after clear.",!dn.getPendingCached().iterator().hasNext());
}
InternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testCorruptReplicaInfo() throws IOException, InterruptedException {
CorruptReplicasMap crm=new CorruptReplicasMap();
assertEquals("Number of corrupt blocks must initially be 0",0,crm.size());
assertNull("Param n cannot be less than 0",crm.getCorruptReplicaBlockIds(-1,null));
assertNull("Param n cannot be greater than 100",crm.getCorruptReplicaBlockIds(101,null));
long[] l=crm.getCorruptReplicaBlockIds(0,null);
assertNotNull("n = 0 must return non-null",l);
assertEquals("n = 0 must return an empty list",0,l.length);
int NUM_BLOCK_IDS=140;
List block_ids=new LinkedList();
for (int i=0; i < NUM_BLOCK_IDS; i++) {
block_ids.add((long)i);
}
DatanodeDescriptor dn1=DFSTestUtil.getLocalDatanodeDescriptor();
DatanodeDescriptor dn2=DFSTestUtil.getLocalDatanodeDescriptor();
addToCorruptReplicasMap(crm,getBlock(0),dn1);
assertEquals("Number of corrupt blocks not returning correctly",1,crm.size());
addToCorruptReplicasMap(crm,getBlock(1),dn1);
assertEquals("Number of corrupt blocks not returning correctly",2,crm.size());
addToCorruptReplicasMap(crm,getBlock(1),dn2);
assertEquals("Number of corrupt blocks not returning correctly",2,crm.size());
crm.removeFromCorruptReplicasMap(getBlock(1));
assertEquals("Number of corrupt blocks not returning correctly",1,crm.size());
crm.removeFromCorruptReplicasMap(getBlock(0));
assertEquals("Number of corrupt blocks not returning correctly",0,crm.size());
for ( Long block_id : block_ids) {
addToCorruptReplicasMap(crm,getBlock(block_id),dn1);
}
assertEquals("Number of corrupt blocks not returning correctly",NUM_BLOCK_IDS,crm.size());
assertTrue("First five block ids not returned correctly ",Arrays.equals(new long[]{0,1,2,3,4},crm.getCorruptReplicaBlockIds(5,null)));
LOG.info(crm.getCorruptReplicaBlockIds(10,7L));
LOG.info(block_ids.subList(7,18));
assertTrue("10 blocks after 7 not returned correctly ",Arrays.equals(new long[]{8,9,10,11,12,13,14,15,16,17},crm.getCorruptReplicaBlockIds(10,7L)));
}
InternalCallVerifierEqualityVerifier
/**
* Test that getInvalidateBlocks observes the maxlimit.
*/
@Test public void testGetInvalidateBlocks() throws Exception {
final int MAX_BLOCKS=10;
final int REMAINING_BLOCKS=2;
final int MAX_LIMIT=MAX_BLOCKS - REMAINING_BLOCKS;
DatanodeDescriptor dd=DFSTestUtil.getLocalDatanodeDescriptor();
ArrayList blockList=new ArrayList(MAX_BLOCKS);
for (int i=0; i < MAX_BLOCKS; i++) {
blockList.add(new Block(i,0,GenerationStamp.LAST_RESERVED_STAMP));
}
dd.addBlocksToBeInvalidated(blockList);
Block[] bc=dd.getInvalidateBlocks(MAX_LIMIT);
assertEquals(bc.length,MAX_LIMIT);
bc=dd.getInvalidateBlocks(MAX_LIMIT);
assertEquals(bc.length,REMAINING_BLOCKS);
}
IterativeVerifierInternalCallVerifierEqualityVerifier
/**
* This test sends a random sequence of node registrations and node removals
* to the DatanodeManager (of nodes with different IDs and versions), and
* checks that the DatanodeManager keeps a correct count of different software
* versions at all times.
*/
@Test public void testNumVersionsReportedCorrect() throws IOException {
FSNamesystem fsn=Mockito.mock(FSNamesystem.class);
Mockito.when(fsn.hasWriteLock()).thenReturn(true);
DatanodeManager dm=new DatanodeManager(Mockito.mock(BlockManager.class),fsn,new Configuration());
Random rng=new Random();
int seed=rng.nextInt();
rng=new Random(seed);
LOG.info("Using seed " + seed + " for testing");
HashMap sIdToDnReg=new HashMap();
for (int i=0; i < NUM_ITERATIONS; ++i) {
if (rng.nextBoolean() && i % 3 == 0 && sIdToDnReg.size() != 0) {
int randomIndex=rng.nextInt() % sIdToDnReg.size();
Iterator> it=sIdToDnReg.entrySet().iterator();
for (int j=0; j < randomIndex - 1; ++j) {
it.next();
}
DatanodeRegistration toRemove=it.next().getValue();
LOG.info("Removing node " + toRemove.getDatanodeUuid() + " ip "+ toRemove.getXferAddr()+ " version : "+ toRemove.getSoftwareVersion());
dm.removeDatanode(toRemove);
it.remove();
}
else {
String storageID="someStorageID" + rng.nextInt(5000);
DatanodeRegistration dr=Mockito.mock(DatanodeRegistration.class);
Mockito.when(dr.getDatanodeUuid()).thenReturn(storageID);
if (sIdToDnReg.containsKey(storageID)) {
dr=sIdToDnReg.get(storageID);
if (rng.nextBoolean()) {
dr.setIpAddr(dr.getIpAddr() + "newIP");
}
}
else {
String ip="someIP" + storageID;
Mockito.when(dr.getIpAddr()).thenReturn(ip);
Mockito.when(dr.getXferAddr()).thenReturn(ip + ":9000");
Mockito.when(dr.getXferPort()).thenReturn(9000);
}
Mockito.when(dr.getSoftwareVersion()).thenReturn("version" + rng.nextInt(5));
LOG.info("Registering node storageID: " + dr.getDatanodeUuid() + ", version: "+ dr.getSoftwareVersion()+ ", IP address: "+ dr.getXferAddr());
dm.registerDatanode(dr);
sIdToDnReg.put(storageID,dr);
}
Map mapToCheck=dm.getDatanodesSoftwareVersions();
for ( Entry it : sIdToDnReg.entrySet()) {
String ver=it.getValue().getSoftwareVersion();
if (!mapToCheck.containsKey(ver)) {
throw new AssertionError("The correct number of datanodes of a " + "version was not found on iteration " + i);
}
mapToCheck.put(ver,mapToCheck.get(ver) - 1);
if (mapToCheck.get(ver) == 0) {
mapToCheck.remove(ver);
}
}
for ( Entry entry : mapToCheck.entrySet()) {
LOG.info("Still in map: " + entry.getKey() + " has "+ entry.getValue());
}
assertEquals("The map of version counts returned by DatanodeManager was" + " not what it was expected to be on iteration " + i,0,mapToCheck.size());
}
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test processOverReplicatedBlock can handle corrupt replicas fine.
* It make sure that it won't treat corrupt replicas as valid ones
* thus prevents NN deleting valid replicas but keeping
* corrupt ones.
*/
@Test public void testProcesOverReplicateBlock() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,Integer.toString(2));
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs=cluster.getFileSystem();
try {
final Path fileName=new Path("/foo1");
DFSTestUtil.createFile(fs,fileName,2,(short)3,0L);
DFSTestUtil.waitReplication(fs,fileName,(short)3);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,fileName);
assertTrue(TestDatanodeBlockScanner.corruptReplica(block,0));
DataNodeProperties dnProps=cluster.stopDataNode(0);
File scanLog=new File(MiniDFSCluster.getFinalizedDir(cluster.getInstanceStorageDir(0,0),cluster.getNamesystem().getBlockPoolId()).getParent().toString() + "/../dncp_block_verification.log.prev");
for (int i=0; !scanLog.delete(); i++) {
assertTrue("Could not delete log file in one minute",i < 60);
try {
Thread.sleep(1000);
}
catch ( InterruptedException ignored) {
}
}
cluster.restartDataNode(dnProps);
DFSTestUtil.waitReplication(fs,fileName,(short)2);
String blockPoolId=cluster.getNamesystem().getBlockPoolId();
final DatanodeID corruptDataNode=DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(2),blockPoolId);
final FSNamesystem namesystem=cluster.getNamesystem();
final BlockManager bm=namesystem.getBlockManager();
final HeartbeatManager hm=bm.getDatanodeManager().getHeartbeatManager();
try {
namesystem.writeLock();
synchronized (hm) {
String corruptMachineName=corruptDataNode.getXferAddr();
for ( DatanodeDescriptor datanode : hm.getDatanodes()) {
if (!corruptMachineName.equals(datanode.getXferAddr())) {
datanode.getStorageInfos()[0].setUtilizationForTesting(100L,100L,0,100L);
datanode.updateHeartbeat(BlockManagerTestUtil.getStorageReportsForDatanode(datanode),0L,0L,0,0);
}
}
NameNodeAdapter.setReplication(namesystem,fileName.toString(),(short)1);
assertEquals(1,bm.countNodes(block.getLocalBlock()).liveReplicas());
}
}
finally {
namesystem.writeUnlock();
}
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Test over replicated block should get invalidated when decreasing the
* replication for a partial block.
*/
@Test public void testInvalidateOverReplicatedBlock() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
try {
final FSNamesystem namesystem=cluster.getNamesystem();
final BlockManager bm=namesystem.getBlockManager();
FileSystem fs=cluster.getFileSystem();
Path p=new Path(MiniDFSCluster.getBaseDirectory(),"/foo1");
FSDataOutputStream out=fs.create(p,(short)2);
out.writeBytes("HDFS-3119: " + p);
out.hsync();
fs.setReplication(p,(short)1);
out.close();
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,p);
assertEquals("Expected only one live replica for the block",1,bm.countNodes(block.getLocalBlock()).liveReplicas());
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* The test verifies that replica for deletion is chosen on a node,
* with the oldest heartbeat, when this heartbeat is larger than the
* tolerable heartbeat interval.
* It creates a file with several blocks and replication 4.
* The last DN is configured to send heartbeats rarely.
* Test waits until the tolerable heartbeat interval expires, and reduces
* replication of the file. All replica deletions should be scheduled for the
* last node. No replicas will actually be deleted, since last DN doesn't
* send heartbeats.
*/
@Test public void testChooseReplicaToDelete() throws Exception {
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,SMALL_BLOCK_SIZE);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
fs=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,300);
cluster.startDataNodes(conf,1,true,null,null,null);
DataNode lastDN=cluster.getDataNodes().get(3);
DatanodeRegistration dnReg=DataNodeTestUtils.getDNRegistrationForBP(lastDN,namesystem.getBlockPoolId());
String lastDNid=dnReg.getDatanodeUuid();
final Path fileName=new Path("/foo2");
DFSTestUtil.createFile(fs,fileName,SMALL_FILE_LENGTH,(short)4,0L);
DFSTestUtil.waitReplication(fs,fileName,(short)4);
DatanodeDescriptor nodeInfo=null;
long lastHeartbeat=0;
long waitTime=DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000 * (DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT + 1);
do {
nodeInfo=namesystem.getBlockManager().getDatanodeManager().getDatanode(dnReg);
lastHeartbeat=nodeInfo.getLastUpdate();
}
while (now() - lastHeartbeat < waitTime);
fs.setReplication(fileName,(short)3);
BlockLocation locs[]=fs.getFileBlockLocations(fs.getFileStatus(fileName),0,Long.MAX_VALUE);
namesystem.readLock();
Collection dnBlocks=namesystem.getBlockManager().excessReplicateMap.get(lastDNid);
assertEquals("Replicas on node " + lastDNid + " should have been deleted",SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE,dnBlocks.size());
namesystem.readUnlock();
for ( BlockLocation location : locs) assertEquals("Block should still have 4 replicas",4,location.getNames().length);
}
finally {
if (fs != null) fs.close();
if (cluster != null) cluster.shutdown();
}
}
InternalCallVerifierEqualityVerifier
/**
* Test whether we can delay the deletion of unknown blocks in DataNode's
* first several block reports.
*/
@Test public void testPendingDeleteUnknownBlocks() throws Exception {
final int fileNum=5;
final Path[] files=new Path[fileNum];
final DataNodeProperties[] dnprops=new DataNodeProperties[REPLICATION];
for (int i=0; i < fileNum; i++) {
files[i]=new Path("/file" + i);
DFSTestUtil.createFile(dfs,files[i],BLOCKSIZE,REPLICATION,i);
}
waitForReplication();
for (int i=REPLICATION - 1; i >= 0; i--) {
dnprops[i]=cluster.stopDataNode(i);
}
Thread.sleep(2000);
for (int i=0; i < 2; i++) {
dfs.delete(files[i],true);
}
cluster.restartNameNode(false);
InvalidateBlocks invalidateBlocks=(InvalidateBlocks)Whitebox.getInternalState(cluster.getNamesystem().getBlockManager(),"invalidateBlocks");
InvalidateBlocks mockIb=Mockito.spy(invalidateBlocks);
Mockito.doReturn(1L).when(mockIb).getInvalidationDelay();
Whitebox.setInternalState(cluster.getNamesystem().getBlockManager(),"invalidateBlocks",mockIb);
Assert.assertEquals(0L,cluster.getNamesystem().getPendingDeletionBlocks());
for (int i=0; i < REPLICATION; i++) {
cluster.restartDataNode(dnprops[i],true);
}
cluster.waitActive();
for (int i=0; i < REPLICATION; i++) {
DataNodeTestUtils.triggerBlockReport(cluster.getDataNodes().get(i));
}
Thread.sleep(2000);
Assert.assertEquals(3,cluster.getNamesystem().getBlocksTotal());
Assert.assertEquals(4,cluster.getNamesystem().getPendingDeletionBlocks());
cluster.restartNameNode(true);
Thread.sleep(6000);
Assert.assertEquals(3,cluster.getNamesystem().getBlocksTotal());
Assert.assertEquals(0,cluster.getNamesystem().getPendingDeletionBlocks());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierAssumptionSetterEqualityVerifierHybridVerifier
/**
* Test when a block's replica is removed from RBW folder in one of the
* datanode, namenode should ask to invalidate that corrupted block and
* schedule replication for one more replica for that under replicated block.
*/
@Test(timeout=600000) public void testBlockInvalidationWhenRBWReplicaMissedInDN() throws IOException, InterruptedException {
assumeTrue(!Path.WINDOWS);
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,2);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,300);
conf.setLong(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,1);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FSDataOutputStream out=null;
try {
final FSNamesystem namesystem=cluster.getNamesystem();
FileSystem fs=cluster.getFileSystem();
Path testPath=new Path("/tmp/TestRBWBlockInvalidation","foo1");
out=fs.create(testPath,(short)2);
out.writeBytes("HDFS-3157: " + testPath);
out.hsync();
cluster.startDataNodes(conf,1,true,null,null,null);
String bpid=namesystem.getBlockPoolId();
ExtendedBlock blk=DFSTestUtil.getFirstBlock(fs,testPath);
Block block=blk.getLocalBlock();
DataNode dn=cluster.getDataNodes().get(0);
File blockFile=DataNodeTestUtils.getBlockFile(dn,bpid,block);
File metaFile=DataNodeTestUtils.getMetaFile(dn,bpid,block);
assertTrue("Could not delete the block file from the RBW folder",blockFile.delete());
assertTrue("Could not delete the block meta file from the RBW folder",metaFile.delete());
out.close();
int liveReplicas=0;
while (true) {
if ((liveReplicas=countReplicas(namesystem,blk).liveReplicas()) < 2) {
LOG.info("Live Replicas after corruption: " + liveReplicas);
break;
}
Thread.sleep(100);
}
assertEquals("There should be less than 2 replicas in the " + "liveReplicasMap",1,liveReplicas);
while (true) {
if ((liveReplicas=countReplicas(namesystem,blk).liveReplicas()) > 1) {
LOG.info("Live Replicas after Rereplication: " + liveReplicas);
break;
}
Thread.sleep(100);
}
assertEquals("There should be two live replicas",2,liveReplicas);
while (true) {
Thread.sleep(100);
if (countReplicas(namesystem,blk).corruptReplicas() == 0) {
LOG.info("Corrupt Replicas becomes 0");
break;
}
}
}
finally {
if (out != null) {
out.close();
}
cluster.shutdown();
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Regression test for HDFS-4799, a case where, upon restart, if there
* were RWR replicas with out-of-date genstamps, the NN could accidentally
* delete good replicas instead of the bad replicas.
*/
@Test(timeout=60000) public void testRWRInvalidation() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,RandomDeleterPolicy.class,BlockPlacementPolicy.class);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
List testPaths=Lists.newArrayList();
for (int i=0; i < 10; i++) {
testPaths.add(new Path("/test" + i));
}
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
List streams=Lists.newArrayList();
try {
for ( Path path : testPaths) {
FSDataOutputStream out=cluster.getFileSystem().create(path,(short)2);
streams.add(out);
out.writeBytes("old gs data\n");
out.hflush();
}
DataNodeProperties oldGenstampNode=cluster.stopDataNode(0);
for (int i=0; i < streams.size(); i++) {
Path path=testPaths.get(i);
FSDataOutputStream out=streams.get(i);
out.writeBytes("new gs data\n");
out.hflush();
cluster.getFileSystem().setReplication(path,(short)1);
out.close();
}
LOG.info("=========================== restarting cluster");
DataNodeProperties otherNode=cluster.stopDataNode(0);
cluster.restartNameNode();
cluster.restartDataNode(oldGenstampNode);
cluster.waitActive();
cluster.restartDataNode(otherNode);
cluster.waitActive();
cluster.getNameNode().getNamesystem().getBlockManager().computeInvalidateWork(2);
cluster.triggerHeartbeats();
HATestUtil.waitForDNDeletions(cluster);
cluster.triggerDeletionReports();
for ( Path path : testPaths) {
String ret=DFSTestUtil.readFile(cluster.getFileSystem(),path);
assertEquals("old gs data\n" + "new gs data\n",ret);
}
}
finally {
IOUtils.cleanup(LOG,streams.toArray(new Closeable[0]));
}
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* In this testcase, client is is a node outside of file system.
* So the 1st replica can be placed on any node.
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 2nd replica,
* @throws Exception
*/
@Test public void testChooseTarget5() throws Exception {
DatanodeDescriptor writerDesc=DFSTestUtil.getDatanodeDescriptor("7.7.7.7","/d2/r4");
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,writerDesc);
assertEquals(targets.length,0);
targets=chooseTarget(1,writerDesc);
assertEquals(targets.length,1);
targets=chooseTarget(2,writerDesc);
assertEquals(targets.length,2);
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3,writerDesc);
assertEquals(targets.length,3);
assertTrue(isOnSameRack(targets[1],targets[2]));
assertFalse(isOnSameRack(targets[0],targets[1]));
}
InternalCallVerifierEqualityVerifier
/**
* Test for the chooseReplicaToDelete are processed based on
* block locality and free space
*/
@Test public void testChooseReplicaToDelete() throws Exception {
List replicaList=new ArrayList();
final Map> rackMap=new HashMap>();
dataNodes[0].setRemaining(4 * 1024 * 1024);
replicaList.add(storages[0]);
dataNodes[1].setRemaining(3 * 1024 * 1024);
replicaList.add(storages[1]);
dataNodes[2].setRemaining(2 * 1024 * 1024);
replicaList.add(storages[2]);
dataNodes[5].setRemaining(1 * 1024 * 1024);
replicaList.add(storages[5]);
for (int i=0; i < dataNodes.length; i++) {
dataNodes[i].setLastUpdate(Time.now());
}
List first=new ArrayList();
List second=new ArrayList();
replicator.splitNodesWithRack(replicaList,rackMap,first,second);
assertEquals(2,first.size());
assertEquals(2,second.size());
DatanodeStorageInfo chosen=replicator.chooseReplicaToDelete(null,null,(short)3,first,second);
assertEquals(chosen,storages[1]);
replicator.adjustSetsWithChosenReplica(rackMap,first,second,chosen);
assertEquals(0,first.size());
assertEquals(3,second.size());
chosen=replicator.chooseReplicaToDelete(null,null,(short)2,first,second);
assertEquals(chosen,storages[5]);
}
IterativeVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* In this testcase, client is dataNodes[0], but dataNodes[0] is not qualified
* to be chosen. So the 1st replica should be placed on dataNodes[1],
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 2nd replica,
* and the rest should be placed on the third rack.
* @throws Exception
*/
@Test public void testChooseTarget3() throws Exception {
updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0);
assertEquals(targets.length,0);
targets=chooseTarget(1);
assertEquals(targets.length,1);
assertEquals(storages[1],targets[0]);
targets=chooseTarget(2);
assertEquals(targets.length,2);
assertEquals(storages[1],targets[0]);
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3);
assertEquals(targets.length,3);
assertEquals(storages[1],targets[0]);
assertTrue(isOnSameRack(targets[1],targets[2]));
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(4);
assertEquals(targets.length,4);
assertEquals(storages[1],targets[0]);
for (int i=1; i < 4; i++) {
assertFalse(isOnSameRack(targets[0],targets[i]));
}
assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3]));
assertFalse(isOnSameRack(targets[1],targets[3]));
updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
BooleanVerifierEqualityVerifierHybridVerifier
/**
* In this testcase, we set 3 nodes (dataNodes[0] ~ dataNodes[2]) as stale,
* and when the number of replicas is less or equal to 3, all the healthy
* datanodes should be returned by the chooseTarget method. When the number
* of replicas is 4, a stale node should be included.
* @throws Exception
*/
@Test public void testChooseTargetWithHalfStaleNodes() throws Exception {
for (int i=0; i < 3; i++) {
dataNodes[i].setLastUpdate(Time.now() - staleInterval - 1);
}
namenode.getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck();
DatanodeStorageInfo[] targets=chooseTarget(0);
assertEquals(targets.length,0);
targets=chooseTarget(1);
assertEquals(targets.length,1);
assertFalse(containsWithinRange(targets[0],dataNodes,0,2));
targets=chooseTarget(2);
assertEquals(targets.length,2);
assertFalse(containsWithinRange(targets[0],dataNodes,0,2));
assertFalse(containsWithinRange(targets[1],dataNodes,0,2));
targets=chooseTarget(3);
assertEquals(targets.length,3);
assertTrue(containsWithinRange(targets[0],dataNodes,3,5));
assertTrue(containsWithinRange(targets[1],dataNodes,3,5));
assertTrue(containsWithinRange(targets[2],dataNodes,3,5));
targets=chooseTarget(4);
assertEquals(targets.length,4);
assertTrue(containsWithinRange(dataNodes[3],targets,0,3));
assertTrue(containsWithinRange(dataNodes[4],targets,0,3));
assertTrue(containsWithinRange(dataNodes[5],targets,0,3));
for (int i=0; i < dataNodes.length; i++) {
dataNodes[i].setLastUpdate(Time.now());
}
namenode.getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck();
}
IterativeVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* In this testcase, client is dataNodes[0], but none of the nodes on rack 1
* is qualified to be chosen. So the 1st replica should be placed on either
* rack 2 or rack 3.
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 1st replica,
* @throws Exception
*/
@Test public void testChoooseTarget4() throws Exception {
for (int i=0; i < 2; i++) {
updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0);
}
DatanodeStorageInfo[] targets;
targets=chooseTarget(0);
assertEquals(targets.length,0);
targets=chooseTarget(1);
assertEquals(targets.length,1);
assertFalse(isOnSameRack(targets[0],dataNodes[0]));
targets=chooseTarget(2);
assertEquals(targets.length,2);
assertFalse(isOnSameRack(targets[0],dataNodes[0]));
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3);
assertEquals(targets.length,3);
for (int i=0; i < 3; i++) {
assertFalse(isOnSameRack(targets[i],dataNodes[0]));
}
assertTrue(isOnSameRack(targets[0],targets[1]) || isOnSameRack(targets[1],targets[2]));
assertFalse(isOnSameRack(targets[0],targets[2]));
for (int i=0; i < 2; i++) {
updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* This testcase tests whether the value returned by
* DFSUtil.getReplWorkMultiplier() is positive,
* and whether an IllegalArgumentException will be thrown
* when a non-positive value is retrieved
*/
@Test public void testGetReplWorkMultiplier(){
Configuration conf=new Configuration();
int blocksReplWorkMultiplier=DFSUtil.getReplWorkMultiplier(conf);
assertTrue(blocksReplWorkMultiplier > 0);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,"3");
blocksReplWorkMultiplier=DFSUtil.getReplWorkMultiplier(conf);
assertEquals(blocksReplWorkMultiplier,3);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,"-1");
exception.expect(IllegalArgumentException.class);
blocksReplWorkMultiplier=DFSUtil.getReplWorkMultiplier(conf);
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* This testcase tests re-replication, when dataNodes[0] is already chosen.
* So the 1st replica can be placed on random rack.
* the 2nd replica should be placed on different node by same rack as
* the 1st replica. The 3rd replica can be placed randomly.
* @throws Exception
*/
@Test public void testRereplicate1() throws Exception {
List chosenNodes=new ArrayList();
chosenNodes.add(storages[0]);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,chosenNodes);
assertEquals(targets.length,0);
targets=chooseTarget(1,chosenNodes);
assertEquals(targets.length,1);
assertFalse(isOnSameRack(targets[0],dataNodes[0]));
targets=chooseTarget(2,chosenNodes);
assertEquals(targets.length,2);
assertTrue(isOnSameRack(targets[0],dataNodes[0]));
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3,chosenNodes);
assertEquals(targets.length,3);
assertTrue(isOnSameRack(targets[0],dataNodes[0]));
assertFalse(isOnSameRack(targets[0],targets[2]));
}
BooleanVerifierEqualityVerifierHybridVerifier
/**
* In this testcase, client is dataNodes[0]. So the 1st replica should be
* placed on dataNodes[0], the 2nd replica should be placed on
* different rack and third should be placed on different node
* of rack chosen for 2nd node.
* The only excpetion is when the numOfReplicas is 2,
* the 1st is on dataNodes[0] and the 2nd is on a different rack.
* @throws Exception
*/
@Test public void testChooseTarget1() throws Exception {
updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,4,0);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0);
assertEquals(targets.length,0);
targets=chooseTarget(1);
assertEquals(targets.length,1);
assertEquals(storages[0],targets[0]);
targets=chooseTarget(2);
assertEquals(targets.length,2);
assertEquals(storages[0],targets[0]);
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3);
assertEquals(targets.length,3);
assertEquals(storages[0],targets[0]);
assertFalse(isOnSameRack(targets[0],targets[1]));
assertTrue(isOnSameRack(targets[1],targets[2]));
targets=chooseTarget(4);
assertEquals(targets.length,4);
assertEquals(storages[0],targets[0]);
assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3]));
assertFalse(isOnSameRack(targets[0],targets[2]));
updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* In this testcase, client is dataNodes[0], but the dataNodes[1] is
* not allowed to be chosen. So the 1st replica should be
* placed on dataNodes[0], the 2nd replica should be placed on a different
* rack, the 3rd should be on same rack as the 2nd replica, and the rest
* should be placed on a third rack.
* @throws Exception
*/
@Test public void testChooseTarget2() throws Exception {
Set excludedNodes;
DatanodeStorageInfo[] targets;
List chosenNodes=new ArrayList();
excludedNodes=new HashSet();
excludedNodes.add(dataNodes[1]);
targets=chooseTarget(0,chosenNodes,excludedNodes);
assertEquals(targets.length,0);
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
targets=chooseTarget(1,chosenNodes,excludedNodes);
assertEquals(targets.length,1);
assertEquals(storages[0],targets[0]);
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
targets=chooseTarget(2,chosenNodes,excludedNodes);
assertEquals(targets.length,2);
assertEquals(storages[0],targets[0]);
assertFalse(isOnSameRack(targets[0],targets[1]));
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
targets=chooseTarget(3,chosenNodes,excludedNodes);
assertEquals(targets.length,3);
assertEquals(storages[0],targets[0]);
assertFalse(isOnSameRack(targets[0],targets[1]));
assertTrue(isOnSameRack(targets[1],targets[2]));
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
targets=chooseTarget(4,chosenNodes,excludedNodes);
assertEquals(targets.length,4);
assertEquals(storages[0],targets[0]);
for (int i=1; i < 4; i++) {
assertFalse(isOnSameRack(targets[0],targets[i]));
}
assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3]));
assertFalse(isOnSameRack(targets[1],targets[3]));
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
chosenNodes.add(storages[2]);
targets=replicator.chooseTarget(filename,1,dataNodes[0],chosenNodes,true,excludedNodes,BLOCK_SIZE,StorageType.DEFAULT);
System.out.println("targets=" + Arrays.asList(targets));
assertEquals(2,targets.length);
int i=0;
for (; i < targets.length && !storages[2].equals(targets[i]); i++) ;
assertTrue(i < targets.length);
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* This testcase tests re-replication,
* when dataNodes[0] and dataNodes[2] are already chosen.
* So the 1st replica should be placed on the rack that the writer resides.
* the rest replicas can be placed randomly,
* @throws Exception
*/
@Test public void testRereplicate3() throws Exception {
List chosenNodes=new ArrayList();
chosenNodes.add(storages[0]);
chosenNodes.add(storages[2]);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,chosenNodes);
assertEquals(targets.length,0);
targets=chooseTarget(1,chosenNodes);
assertEquals(targets.length,1);
assertTrue(isOnSameRack(targets[0],dataNodes[0]));
assertFalse(isOnSameRack(targets[0],dataNodes[2]));
targets=chooseTarget(1,dataNodes[2],chosenNodes);
assertEquals(targets.length,1);
assertTrue(isOnSameRack(targets[0],dataNodes[2]));
assertFalse(isOnSameRack(targets[0],dataNodes[0]));
targets=chooseTarget(2,chosenNodes);
assertEquals(targets.length,2);
assertTrue(isOnSameRack(targets[0],dataNodes[0]));
targets=chooseTarget(2,dataNodes[2],chosenNodes);
assertEquals(targets.length,2);
assertTrue(isOnSameRack(targets[0],dataNodes[2]));
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* In this testcase, it tries to choose more targets than available nodes and
* check the result.
* @throws Exception
*/
@Test public void testChooseTargetWithMoreThanAvailableNodes() throws Exception {
for (int i=0; i < 2; i++) {
updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0);
}
final LogVerificationAppender appender=new LogVerificationAppender();
final Logger logger=Logger.getRootLogger();
logger.addAppender(appender);
DatanodeStorageInfo[] targets=chooseTarget(NUM_OF_DATANODES);
assertEquals(targets.length,NUM_OF_DATANODES - 2);
final List log=appender.getLog();
assertNotNull(log);
assertFalse(log.size() == 0);
final LoggingEvent lastLogEntry=log.get(log.size() - 1);
assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel()));
assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2"));
for (int i=0; i < 2; i++) {
updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* This testcase tests whether the default value returned by
* DFSUtil.getInvalidateWorkPctPerIteration() is positive,
* and whether an IllegalArgumentException will be thrown
* when 0.0f is retrieved
*/
@Test public void testGetInvalidateWorkPctPerIteration(){
Configuration conf=new Configuration();
float blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf);
assertTrue(blocksInvalidateWorkPct > 0);
conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,"0.5f");
blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf);
assertEquals(blocksInvalidateWorkPct,0.5f,blocksInvalidateWorkPct * 1e-7);
conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,"1.0f");
blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf);
assertEquals(blocksInvalidateWorkPct,1.0f,blocksInvalidateWorkPct * 1e-7);
conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,"0.0f");
exception.expect(IllegalArgumentException.class);
blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf);
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* This testcase tests re-replication,
* when dataNodes[0] and dataNodes[1] are already chosen.
* So the 1st replica should be placed on a different rack than rack 1.
* the rest replicas can be placed randomly,
* @throws Exception
*/
@Test public void testRereplicate2() throws Exception {
List chosenNodes=new ArrayList();
chosenNodes.add(storages[0]);
chosenNodes.add(storages[1]);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,chosenNodes);
assertEquals(targets.length,0);
targets=chooseTarget(1,chosenNodes);
assertEquals(targets.length,1);
assertFalse(isOnSameRack(targets[0],dataNodes[0]));
targets=chooseTarget(2,chosenNodes);
assertEquals(targets.length,2);
assertFalse(isOnSameRack(targets[0],dataNodes[0]));
assertFalse(isOnSameRack(targets[1],dataNodes[0]));
}
APIUtilityVerifierIterativeVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testChooseTargetWithDependencies() throws Exception {
for (int i=0; i < NUM_OF_DATANODES; i++) {
cluster.remove(dataNodes[i]);
}
for (int i=0; i < NUM_OF_DATANODES_MORE_TARGETS; i++) {
DatanodeDescriptor node=dataNodesInMoreTargetsCase[i];
if (cluster.contains(node)) {
cluster.remove(node);
}
}
Host2NodesMap host2DatanodeMap=namenode.getNamesystem().getBlockManager().getDatanodeManager().getHost2DatanodeMap();
for (int i=0; i < NUM_OF_DATANODES_FOR_DEPENDENCIES; i++) {
cluster.add(dataNodesForDependencies[i]);
host2DatanodeMap.add(dataNodesForDependencies[i]);
}
dataNodesForDependencies[1].addDependentHostName(dataNodesForDependencies[2].getHostName());
dataNodesForDependencies[2].addDependentHostName(dataNodesForDependencies[1].getHostName());
dataNodesForDependencies[3].addDependentHostName(dataNodesForDependencies[4].getHostName());
dataNodesForDependencies[4].addDependentHostName(dataNodesForDependencies[3].getHostName());
for (int i=0; i < NUM_OF_DATANODES_FOR_DEPENDENCIES; i++) {
updateHeartbeatWithUsage(dataNodesForDependencies[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
List chosenNodes=new ArrayList();
DatanodeStorageInfo[] targets;
Set excludedNodes=new HashSet();
excludedNodes.add(dataNodesForDependencies[5]);
targets=chooseTarget(3,dataNodesForDependencies[1],chosenNodes,excludedNodes);
assertEquals(targets.length,2);
assertEquals(targets[0],storagesForDependencies[1]);
assertTrue(targets[1].equals(storagesForDependencies[3]) || targets[1].equals(storagesForDependencies[4]));
assertEquals(excludedNodes.size(),NUM_OF_DATANODES_FOR_DEPENDENCIES);
for (int i=0; i < NUM_OF_DATANODES_FOR_DEPENDENCIES; i++) {
assertTrue(excludedNodes.contains(dataNodesForDependencies[i]));
}
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* This testcase tests re-replication,
* when dataNodes[0] and dataNodes[3] are already chosen.
* So the 1st replica should be placed on the rack that the writer resides.
* the rest replicas can be placed randomly,
* @throws Exception
*/
@Test public void testRereplicate3() throws Exception {
setupDataNodeCapacity();
List chosenNodes=new ArrayList();
chosenNodes.add(storages[0]);
chosenNodes.add(storages[3]);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,chosenNodes);
assertEquals(targets.length,0);
targets=chooseTarget(1,chosenNodes);
assertEquals(targets.length,1);
assertTrue(isOnSameRack(dataNodes[0],targets[0]));
assertFalse(isOnSameRack(dataNodes[3],targets[0]));
targets=chooseTarget(1,dataNodes[3],chosenNodes);
assertEquals(targets.length,1);
assertTrue(isOnSameRack(dataNodes[3],targets[0]));
assertFalse(isOnSameNodeGroup(dataNodes[3],targets[0]));
assertFalse(isOnSameRack(dataNodes[0],targets[0]));
targets=chooseTarget(2,chosenNodes);
assertEquals(targets.length,2);
assertTrue(isOnSameRack(dataNodes[0],targets[0]));
assertFalse(isOnSameNodeGroup(dataNodes[0],targets[0]));
targets=chooseTarget(2,dataNodes[3],chosenNodes);
assertEquals(targets.length,2);
assertTrue(isOnSameRack(dataNodes[3],targets[0]));
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* This testcase tests re-replication,
* when dataNodes[0] and dataNodes[1] are already chosen.
* So the 1st replica should be placed on a different rack of rack 1.
* the rest replicas can be placed randomly,
* @throws Exception
*/
@Test public void testRereplicate2() throws Exception {
setupDataNodeCapacity();
List chosenNodes=new ArrayList();
chosenNodes.add(storages[0]);
chosenNodes.add(storages[1]);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,chosenNodes);
assertEquals(targets.length,0);
targets=chooseTarget(1,chosenNodes);
assertEquals(targets.length,1);
assertFalse(isOnSameRack(dataNodes[0],targets[0]));
targets=chooseTarget(2,chosenNodes);
assertEquals(targets.length,2);
assertFalse(isOnSameRack(dataNodes[0],targets[0]) && isOnSameRack(dataNodes[0],targets[1]));
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* This testcase tests re-replication, when dataNodes[0] is already chosen.
* So the 1st replica can be placed on random rack.
* the 2nd replica should be placed on different node and nodegroup by same rack as
* the 1st replica. The 3rd replica can be placed randomly.
* @throws Exception
*/
@Test public void testRereplicate1() throws Exception {
setupDataNodeCapacity();
List chosenNodes=new ArrayList();
chosenNodes.add(storages[0]);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,chosenNodes);
assertEquals(targets.length,0);
targets=chooseTarget(1,chosenNodes);
assertEquals(targets.length,1);
assertFalse(isOnSameRack(dataNodes[0],targets[0]));
targets=chooseTarget(2,chosenNodes);
assertEquals(targets.length,2);
assertTrue(isOnSameRack(dataNodes[0],targets[0]));
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3,chosenNodes);
assertEquals(targets.length,3);
assertTrue(isOnSameRack(dataNodes[0],targets[0]));
assertFalse(isOnSameNodeGroup(dataNodes[0],targets[0]));
assertFalse(isOnSameRack(targets[0],targets[2]));
}
BooleanVerifierEqualityVerifierHybridVerifier
/**
* Test replica placement policy in case of boundary topology.
* Rack 2 has only 1 node group & can't be placed with two replicas
* The 1st replica will be placed on writer.
* The 2nd replica should be placed on a different rack
* The 3rd replica should be placed on the same rack with writer, but on a
* different node group.
*/
@Test public void testChooseTargetsOnBoundaryTopology() throws Exception {
for (int i=0; i < NUM_OF_DATANODES; i++) {
cluster.remove(dataNodes[i]);
}
for (int i=0; i < NUM_OF_DATANODES_BOUNDARY; i++) {
cluster.add(dataNodesInBoundaryCase[i]);
}
for (int i=0; i < NUM_OF_DATANODES_BOUNDARY; i++) {
updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0);
updateHeartbeatWithUsage(dataNodesInBoundaryCase[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,dataNodesInBoundaryCase[0]);
assertEquals(targets.length,0);
targets=chooseTarget(1,dataNodesInBoundaryCase[0]);
assertEquals(targets.length,1);
targets=chooseTarget(2,dataNodesInBoundaryCase[0]);
assertEquals(targets.length,2);
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3,dataNodesInBoundaryCase[0]);
assertEquals(targets.length,3);
assertTrue(checkTargetsOnDifferentNodeGroup(targets));
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* In this testcase, client is is a node outside of file system.
* So the 1st replica can be placed on any node.
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 2nd replica,
* @throws Exception
*/
@Test public void testChooseTarget5() throws Exception {
setupDataNodeCapacity();
DatanodeStorageInfo[] targets;
targets=chooseTarget(0,NODE);
assertEquals(targets.length,0);
targets=chooseTarget(1,NODE);
assertEquals(targets.length,1);
targets=chooseTarget(2,NODE);
assertEquals(targets.length,2);
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3,NODE);
assertEquals(targets.length,3);
assertTrue(isOnSameRack(targets[1],targets[2]));
assertFalse(isOnSameRack(targets[0],targets[1]));
verifyNoTwoTargetsOnSameNodeGroup(targets);
}
InternalCallVerifierEqualityVerifier
/**
* Test for the chooseReplicaToDelete are processed based on
* block locality and free space
*/
@Test public void testChooseReplicaToDelete() throws Exception {
List replicaList=new ArrayList();
final Map> rackMap=new HashMap>();
dataNodes[0].setRemaining(4 * 1024 * 1024);
replicaList.add(storages[0]);
dataNodes[1].setRemaining(3 * 1024 * 1024);
replicaList.add(storages[1]);
dataNodes[2].setRemaining(2 * 1024 * 1024);
replicaList.add(storages[2]);
dataNodes[5].setRemaining(1 * 1024 * 1024);
replicaList.add(storages[5]);
List first=new ArrayList();
List second=new ArrayList();
replicator.splitNodesWithRack(replicaList,rackMap,first,second);
assertEquals(3,first.size());
assertEquals(1,second.size());
DatanodeStorageInfo chosen=replicator.chooseReplicaToDelete(null,null,(short)3,first,second);
assertEquals(chosen,storages[1]);
replicator.adjustSetsWithChosenReplica(rackMap,first,second,chosen);
assertEquals(2,first.size());
assertEquals(1,second.size());
chosen=replicator.chooseReplicaToDelete(null,null,(short)2,first,second);
assertEquals(chosen,storages[2]);
replicator.adjustSetsWithChosenReplica(rackMap,first,second,chosen);
assertEquals(0,first.size());
assertEquals(2,second.size());
chosen=replicator.chooseReplicaToDelete(null,null,(short)1,first,second);
assertEquals(chosen,storages[5]);
}
IterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* In this testcase, client is dataNodes[0], but the dataNodes[1] is
* not allowed to be chosen. So the 1st replica should be
* placed on dataNodes[0], the 2nd replica should be placed on a different
* rack, the 3rd should be on same rack as the 2nd replica but in different
* node group, and the rest should be placed on a third rack.
* @throws Exception
*/
@Test public void testChooseTarget2() throws Exception {
DatanodeStorageInfo[] targets;
BlockPlacementPolicyDefault repl=(BlockPlacementPolicyDefault)replicator;
List chosenNodes=new ArrayList();
Set excludedNodes=new HashSet();
excludedNodes.add(dataNodes[1]);
targets=repl.chooseTarget(filename,4,dataNodes[0],chosenNodes,false,excludedNodes,BLOCK_SIZE,StorageType.DEFAULT);
assertEquals(targets.length,4);
assertEquals(storages[0],targets[0]);
assertTrue(cluster.isNodeGroupAware());
for (int i=1; i < 4; i++) {
assertFalse(isOnSameNodeGroup(targets[0],targets[i]));
}
assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3]));
assertFalse(isOnSameRack(targets[1],targets[3]));
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
chosenNodes.add(storages[2]);
targets=repl.chooseTarget(filename,1,dataNodes[0],chosenNodes,true,excludedNodes,BLOCK_SIZE,StorageType.DEFAULT);
System.out.println("targets=" + Arrays.asList(targets));
assertEquals(2,targets.length);
int i=0;
for (; i < targets.length && !storages[2].equals(targets[i]); i++) ;
assertTrue(i < targets.length);
}
BooleanVerifierEqualityVerifierHybridVerifier
/**
* In this testcase, client is dataNodes[0], but dataNodes[0] is not qualified
* to be chosen. So the 1st replica should be placed on dataNodes[1],
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 2nd replica but in different nodegroup,
* and the rest should be placed on the third rack.
* @throws Exception
*/
@Test public void testChooseTarget3() throws Exception {
updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0);
assertEquals(targets.length,0);
targets=chooseTarget(1);
assertEquals(targets.length,1);
assertEquals(storages[1],targets[0]);
targets=chooseTarget(2);
assertEquals(targets.length,2);
assertEquals(storages[1],targets[0]);
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3);
assertEquals(targets.length,3);
assertEquals(storages[1],targets[0]);
assertTrue(isOnSameRack(targets[1],targets[2]));
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(4);
assertEquals(targets.length,4);
assertEquals(storages[1],targets[0]);
assertTrue(cluster.isNodeGroupAware());
verifyNoTwoTargetsOnSameNodeGroup(targets);
assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3]));
updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
IterativeVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* In this testcase, client is dataNodes[0], but none of the nodes on rack 1
* is qualified to be chosen. So the 1st replica should be placed on either
* rack 2 or rack 3.
* the 2nd replica should be placed on a different rack,
* the 3rd replica should be placed on the same rack as the 1st replica, but
* in different node group.
* @throws Exception
*/
@Test public void testChooseTarget4() throws Exception {
for (int i=0; i < 3; i++) {
updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0);
}
DatanodeStorageInfo[] targets;
targets=chooseTarget(0);
assertEquals(targets.length,0);
targets=chooseTarget(1);
assertEquals(targets.length,1);
assertFalse(isOnSameRack(dataNodes[0],targets[0]));
targets=chooseTarget(2);
assertEquals(targets.length,2);
assertFalse(isOnSameRack(dataNodes[0],targets[0]));
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3);
assertEquals(targets.length,3);
for (int i=0; i < 3; i++) {
assertFalse(isOnSameRack(dataNodes[0],targets[i]));
}
verifyNoTwoTargetsOnSameNodeGroup(targets);
assertTrue(isOnSameRack(targets[0],targets[1]) || isOnSameRack(targets[1],targets[2]));
assertFalse(isOnSameRack(targets[0],targets[2]));
}
BooleanVerifierEqualityVerifierHybridVerifier
/**
* Test replica placement policy in case of targets more than number of
* NodeGroups.
* The 12-nodes cluster only has 6 NodeGroups, but in some cases, like:
* placing submitted job file, there is requirement to choose more (10)
* targets for placing replica. We should test it can return 6 targets.
*/
@Test public void testChooseMoreTargetsThanNodeGroups() throws Exception {
for (int i=0; i < NUM_OF_DATANODES; i++) {
cluster.remove(dataNodes[i]);
}
for (int i=0; i < NUM_OF_DATANODES_BOUNDARY; i++) {
DatanodeDescriptor node=dataNodesInBoundaryCase[i];
if (cluster.contains(node)) {
cluster.remove(node);
}
}
for (int i=0; i < NUM_OF_DATANODES_MORE_TARGETS; i++) {
cluster.add(dataNodesInMoreTargetsCase[i]);
}
for (int i=0; i < NUM_OF_DATANODES_MORE_TARGETS; i++) {
updateHeartbeatWithUsage(dataNodesInMoreTargetsCase[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
DatanodeStorageInfo[] targets;
targets=chooseTarget(3,dataNodesInMoreTargetsCase[0]);
assertEquals(targets.length,3);
assertTrue(checkTargetsOnDifferentNodeGroup(targets));
targets=chooseTarget(10,dataNodesInMoreTargetsCase[0]);
assertTrue(checkTargetsOnDifferentNodeGroup(targets));
assertEquals(targets.length,6);
}
BooleanVerifierEqualityVerifierHybridVerifier
/**
* In this testcase, client is dataNodes[0]. So the 1st replica should be
* placed on dataNodes[0], the 2nd replica should be placed on
* different rack and third should be placed on different node (and node group)
* of rack chosen for 2nd node.
* The only excpetion is when the numOfReplicas is 2,
* the 1st is on dataNodes[0] and the 2nd is on a different rack.
* @throws Exception
*/
@Test public void testChooseTarget1() throws Exception {
updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,4,0);
DatanodeStorageInfo[] targets;
targets=chooseTarget(0);
assertEquals(targets.length,0);
targets=chooseTarget(1);
assertEquals(targets.length,1);
assertEquals(storages[0],targets[0]);
targets=chooseTarget(2);
assertEquals(targets.length,2);
assertEquals(storages[0],targets[0]);
assertFalse(isOnSameRack(targets[0],targets[1]));
targets=chooseTarget(3);
assertEquals(targets.length,3);
assertEquals(storages[0],targets[0]);
assertFalse(isOnSameRack(targets[0],targets[1]));
assertTrue(isOnSameRack(targets[1],targets[2]));
assertFalse(isOnSameNodeGroup(targets[1],targets[2]));
targets=chooseTarget(4);
assertEquals(targets.length,4);
assertEquals(storages[0],targets[0]);
assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3]));
assertFalse(isOnSameRack(targets[0],targets[2]));
verifyNoTwoTargetsOnSameNodeGroup(targets);
updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
APIUtilityVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test for an OS dependent absolute paths.
* @throws IOException
*/
@Test public void testAbsolutePathAsURI() throws IOException {
URI u=null;
u=Util.stringAsURI(ABSOLUTE_PATH_WINDOWS);
assertNotNull("Uri should not be null for Windows path" + ABSOLUTE_PATH_WINDOWS,u);
assertEquals(URI_FILE_SCHEMA,u.getScheme());
u=Util.stringAsURI(ABSOLUTE_PATH_UNIX);
assertNotNull("Uri should not be null for Unix path" + ABSOLUTE_PATH_UNIX,u);
assertEquals(URI_FILE_SCHEMA,u.getScheme());
}
APIUtilityVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test for a URI
* @throws IOException
*/
@Test public void testURI() throws IOException {
LOG.info("Testing correct Unix URI: " + URI_UNIX);
URI u=Util.stringAsURI(URI_UNIX);
LOG.info("Uri: " + u);
assertNotNull("Uri should not be null at this point",u);
assertEquals(URI_FILE_SCHEMA,u.getScheme());
assertEquals(URI_PATH_UNIX,u.getPath());
LOG.info("Testing correct windows URI: " + URI_WINDOWS);
u=Util.stringAsURI(URI_WINDOWS);
LOG.info("Uri: " + u);
assertNotNull("Uri should not be null at this point",u);
assertEquals(URI_FILE_SCHEMA,u.getScheme());
assertEquals(URI_PATH_WINDOWS.replace("%20"," "),u.getPath());
}
InternalCallVerifierEqualityVerifier
@Test(timeout=300000) public void blockReport_09() throws IOException {
final String METHOD_NAME=GenericTestUtils.getMethodName();
Path filePath=new Path("/" + METHOD_NAME + ".dat");
final int DN_N1=DN_N0 + 1;
final int bytesChkSum=1024 * 1000;
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,bytesChkSum);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,6 * bytesChkSum);
shutDownCluster();
startUpCluster();
try {
writeFile(METHOD_NAME,12 * bytesChkSum,filePath);
Block bl=findBlock(filePath,12 * bytesChkSum);
BlockChecker bc=new BlockChecker(filePath);
bc.start();
waitForTempReplica(bl,DN_N1);
DataNode dn=cluster.getDataNodes().get(DN_N1);
String poolId=cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR=dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports=getBlockReports(dn,poolId,true,true);
sendBlockReports(dnR,poolId,reports);
printStats();
assertEquals("Wrong number of PendingReplication blocks",2,cluster.getNamesystem().getPendingReplicationBlocks());
try {
bc.join();
}
catch ( InterruptedException e) {
}
}
finally {
resetConfiguration();
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* The test set the configuration parameters for a large block size and
* restarts initiated single-node cluster.
* Then it writes a file > block_size and closes it.
* The second datanode is started in the cluster.
* As soon as the replication process is started and at least one TEMPORARY
* replica is found test forces BlockReport process and checks
* if the TEMPORARY replica isn't reported on it.
* Eventually, the configuration is being restored into the original state.
* @throws IOException in case of an error
*/
@Test(timeout=300000) public void blockReport_08() throws IOException {
final String METHOD_NAME=GenericTestUtils.getMethodName();
Path filePath=new Path("/" + METHOD_NAME + ".dat");
final int DN_N1=DN_N0 + 1;
final int bytesChkSum=1024 * 1000;
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,bytesChkSum);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,6 * bytesChkSum);
shutDownCluster();
startUpCluster();
try {
ArrayList blocks=writeFile(METHOD_NAME,12 * bytesChkSum,filePath);
Block bl=findBlock(filePath,12 * bytesChkSum);
BlockChecker bc=new BlockChecker(filePath);
bc.start();
waitForTempReplica(bl,DN_N1);
DataNode dn=cluster.getDataNodes().get(DN_N1);
String poolId=cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR=dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports=getBlockReports(dn,poolId,false,false);
sendBlockReports(dnR,poolId,reports);
printStats();
assertEquals("Wrong number of PendingReplication blocks",blocks.size(),cluster.getNamesystem().getPendingReplicationBlocks());
try {
bc.join();
}
catch ( InterruptedException e) {
}
}
finally {
resetConfiguration();
}
}
InternalCallVerifierEqualityVerifier
/**
* Test creates a file and closes it.
* The second datanode is started in the cluster.
* As soon as the replication process is completed test runs
* Block report and checks that no underreplicated blocks are left
* @throws IOException in case of an error
*/
@Test(timeout=300000) public void blockReport_06() throws Exception {
final String METHOD_NAME=GenericTestUtils.getMethodName();
Path filePath=new Path("/" + METHOD_NAME + ".dat");
final int DN_N1=DN_N0 + 1;
writeFile(METHOD_NAME,FILE_SIZE,filePath);
startDNandWait(filePath,true);
DataNode dn=cluster.getDataNodes().get(DN_N1);
String poolId=cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR=dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports=getBlockReports(dn,poolId,false,false);
sendBlockReports(dnR,poolId,reports);
printStats();
assertEquals("Wrong number of PendingReplication Blocks",0,cluster.getNamesystem().getUnderReplicatedBlocks());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test write a file, verifies and closes it. Then a couple of random blocks
* is removed and BlockReport is forced; the FSNamesystem is pushed to
* recalculate required DN's activities such as replications and so on.
* The number of missing and under-replicated blocks should be the same in
* case of a single-DN cluster.
* @throws IOException in case of errors
*/
@Test(timeout=300000) public void blockReport_02() throws IOException {
final String METHOD_NAME=GenericTestUtils.getMethodName();
LOG.info("Running test " + METHOD_NAME);
Path filePath=new Path("/" + METHOD_NAME + ".dat");
DFSTestUtil.createFile(fs,filePath,FILE_SIZE,REPL_FACTOR,rand.nextLong());
File dataDir=new File(cluster.getDataDirectory());
assertTrue(dataDir.isDirectory());
List blocks2Remove=new ArrayList();
List removedIndex=new ArrayList();
List lBlocks=cluster.getNameNodeRpc().getBlockLocations(filePath.toString(),FILE_START,FILE_SIZE).getLocatedBlocks();
while (removedIndex.size() != 2) {
int newRemoveIndex=rand.nextInt(lBlocks.size());
if (!removedIndex.contains(newRemoveIndex)) removedIndex.add(newRemoveIndex);
}
for ( Integer aRemovedIndex : removedIndex) {
blocks2Remove.add(lBlocks.get(aRemovedIndex).getBlock());
}
if (LOG.isDebugEnabled()) {
LOG.debug("Number of blocks allocated " + lBlocks.size());
}
final DataNode dn0=cluster.getDataNodes().get(DN_N0);
for ( ExtendedBlock b : blocks2Remove) {
if (LOG.isDebugEnabled()) {
LOG.debug("Removing the block " + b.getBlockName());
}
for ( File f : findAllFiles(dataDir,new MyFileFilter(b.getBlockName(),true))) {
DataNodeTestUtils.getFSDataset(dn0).unfinalizeBlock(b);
if (!f.delete()) {
LOG.warn("Couldn't delete " + b.getBlockName());
}
else {
LOG.debug("Deleted file " + f.toString());
}
}
}
waitTil(DN_RESCAN_EXTRA_WAIT);
String poolId=cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR=dn0.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports=getBlockReports(dn0,poolId,false,false);
sendBlockReports(dnR,poolId,reports);
BlockManagerTestUtil.getComputedDatanodeWork(cluster.getNamesystem().getBlockManager());
printStats();
assertEquals("Wrong number of MissingBlocks is found",blocks2Remove.size(),cluster.getNamesystem().getMissingBlocksCount());
assertEquals("Wrong number of UnderReplicatedBlocks is found",blocks2Remove.size(),cluster.getNamesystem().getUnderReplicatedBlocks());
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierEqualityVerifier
/**
* Test write a file, verifies and closes it. Then the length of the blocks
* are messed up and BlockReport is forced.
* The modification of blocks' length has to be ignored
* @throws java.io.IOException on an error
*/
@Test(timeout=300000) public void blockReport_01() throws IOException {
final String METHOD_NAME=GenericTestUtils.getMethodName();
Path filePath=new Path("/" + METHOD_NAME + ".dat");
ArrayList blocks=prepareForRide(filePath,METHOD_NAME,FILE_SIZE);
if (LOG.isDebugEnabled()) {
LOG.debug("Number of blocks allocated " + blocks.size());
}
long[] oldLengths=new long[blocks.size()];
int tempLen;
for (int i=0; i < blocks.size(); i++) {
Block b=blocks.get(i);
if (LOG.isDebugEnabled()) {
LOG.debug("Block " + b.getBlockName() + " before\t"+ "Size "+ b.getNumBytes());
}
oldLengths[i]=b.getNumBytes();
if (LOG.isDebugEnabled()) {
LOG.debug("Setting new length");
}
tempLen=rand.nextInt(BLOCK_SIZE);
b.set(b.getBlockId(),tempLen,b.getGenerationStamp());
if (LOG.isDebugEnabled()) {
LOG.debug("Block " + b.getBlockName() + " after\t "+ "Size "+ b.getNumBytes());
}
}
DataNode dn=cluster.getDataNodes().get(DN_N0);
String poolId=cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR=dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports=getBlockReports(dn,poolId,false,false);
sendBlockReports(dnR,poolId,reports);
List blocksAfterReport=DFSTestUtil.getAllBlocks(fs.open(filePath));
if (LOG.isDebugEnabled()) {
LOG.debug("After mods: Number of blocks allocated " + blocksAfterReport.size());
}
for (int i=0; i < blocksAfterReport.size(); i++) {
ExtendedBlock b=blocksAfterReport.get(i).getBlock();
assertEquals("Length of " + i + "th block is incorrect",oldLengths[i],b.getNumBytes());
}
}
InternalCallVerifierEqualityVerifier
/**
* Test for the case where one of the DNs in the pipeline is in the
* process of doing a block report exactly when the block is closed.
* In this case, the block report becomes delayed until after the
* block is marked completed on the NN, and hence it reports an RBW
* replica for a COMPLETE block. Such a report should not be marked
* corrupt.
* This is a regression test for HDFS-2791.
*/
@Test(timeout=300000) public void testOneReplicaRbwReportArrivesAfterBlockCompleted() throws Exception {
final CountDownLatch brFinished=new CountDownLatch(1);
DelayAnswer delayer=new GenericTestUtils.DelayAnswer(LOG){
@Override protected Object passThrough( InvocationOnMock invocation) throws Throwable {
try {
return super.passThrough(invocation);
}
finally {
brFinished.countDown();
}
}
}
;
final String METHOD_NAME=GenericTestUtils.getMethodName();
Path filePath=new Path("/" + METHOD_NAME + ".dat");
REPL_FACTOR=2;
startDNandWait(null,false);
NameNode nn=cluster.getNameNode();
FSDataOutputStream out=fs.create(filePath,REPL_FACTOR);
try {
AppendTestUtil.write(out,0,10);
out.hflush();
DataNode dn=cluster.getDataNodes().get(0);
DatanodeProtocolClientSideTranslatorPB spy=DataNodeTestUtils.spyOnBposToNN(dn,nn);
Mockito.doAnswer(delayer).when(spy).blockReport(Mockito.anyObject(),Mockito.anyString(),Mockito.anyObject());
dn.scheduleAllBlockReport(0);
delayer.waitForCall();
}
finally {
IOUtils.closeStream(out);
}
delayer.proceed();
brFinished.await();
BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager());
assertEquals(0,nn.getNamesystem().getCorruptReplicaBlocks());
DFSTestUtil.readFile(fs,filePath);
cluster.stopDataNode(1);
DFSTestUtil.readFile(fs,filePath);
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Test datanode block pool initialization error handling.
* Failure in initializing a block pool should not cause NPE.
*/
@Test public void testBPInitErrorHandling() throws Exception {
final DataNode mockDn=Mockito.mock(DataNode.class);
Mockito.doReturn(true).when(mockDn).shouldRun();
Configuration conf=new Configuration();
File dnDataDir=new File(new File(TEST_BUILD_DATA,"testBPInitErrorHandling"),"data");
conf.set(DFS_DATANODE_DATA_DIR_KEY,dnDataDir.toURI().toString());
Mockito.doReturn(conf).when(mockDn).getConf();
Mockito.doReturn(new DNConf(conf)).when(mockDn).getDnConf();
Mockito.doReturn(DataNodeMetrics.create(conf,"fake dn")).when(mockDn).getMetrics();
final AtomicInteger count=new AtomicInteger();
Mockito.doAnswer(new Answer(){
@Override public Void answer( InvocationOnMock invocation) throws Throwable {
if (count.getAndIncrement() == 0) {
throw new IOException("faked initBlockPool exception");
}
Mockito.doReturn(mockFSDataset).when(mockDn).getFSDataset();
return null;
}
}
).when(mockDn).initBlockPool(Mockito.any(BPOfferService.class));
BPOfferService bpos=setupBPOSForNNs(mockDn,mockNN1,mockNN2);
List actors=bpos.getBPServiceActors();
assertEquals(2,actors.size());
bpos.start();
try {
waitForInitialization(bpos);
waitForBlockReport(mockNN1,mockNN2);
}
finally {
bpos.stop();
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Test that the BPOS can register to talk to two different NNs,
* sends block reports to both, etc.
*/
@Test public void testBasicFunctionality() throws Exception {
BPOfferService bpos=setupBPOSForNNs(mockNN1,mockNN2);
bpos.start();
try {
waitForInitialization(bpos);
Mockito.verify(mockNN1).registerDatanode(Mockito.any(DatanodeRegistration.class));
Mockito.verify(mockNN2).registerDatanode(Mockito.any(DatanodeRegistration.class));
waitForBlockReport(mockNN1);
waitForBlockReport(mockNN2);
bpos.notifyNamenodeReceivedBlock(FAKE_BLOCK,"","");
ReceivedDeletedBlockInfo[] ret=waitForBlockReceived(FAKE_BLOCK,mockNN1);
assertEquals(1,ret.length);
assertEquals(FAKE_BLOCK.getLocalBlock(),ret[0].getBlock());
ret=waitForBlockReceived(FAKE_BLOCK,mockNN2);
assertEquals(1,ret.length);
assertEquals(FAKE_BLOCK.getLocalBlock(),ret[0].getBlock());
}
finally {
bpos.stop();
}
}
InternalCallVerifierBooleanVerifierAssumptionSetterEqualityVerifierHybridVerifier
/**
* Test that individual volume failures do not cause DNs to fail, that
* all volumes failed on a single datanode do cause it to fail, and
* that the capacities and liveliness is adjusted correctly in the NN.
*/
@Test public void testSuccessiveVolumeFailures() throws Exception {
assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
cluster.startDataNodes(conf,2,true,null,null);
cluster.waitActive();
Thread.sleep(WAIT_FOR_HEARTBEATS);
final DatanodeManager dm=cluster.getNamesystem().getBlockManager().getDatanodeManager();
final long origCapacity=DFSTestUtil.getLiveDatanodeCapacity(dm);
long dnCapacity=DFSTestUtil.getDatanodeCapacity(dm,0);
File dn1Vol1=new File(dataDir,"data" + (2 * 0 + 1));
File dn2Vol1=new File(dataDir,"data" + (2 * 1 + 1));
File dn3Vol1=new File(dataDir,"data" + (2 * 2 + 1));
File dn3Vol2=new File(dataDir,"data" + (2 * 2 + 2));
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn1Vol1,false));
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn2Vol1,false));
Path file1=new Path("/test1");
DFSTestUtil.createFile(fs,file1,1024,(short)3,1L);
DFSTestUtil.waitReplication(fs,file1,(short)3);
ArrayList dns=cluster.getDataNodes();
assertTrue("DN1 should be up",dns.get(0).isDatanodeUp());
assertTrue("DN2 should be up",dns.get(1).isDatanodeUp());
assertTrue("DN3 should be up",dns.get(2).isDatanodeUp());
assertCounter("VolumeFailures",1L,getMetrics(dns.get(0).getMetrics().name()));
assertCounter("VolumeFailures",1L,getMetrics(dns.get(1).getMetrics().name()));
assertCounter("VolumeFailures",0L,getMetrics(dns.get(2).getMetrics().name()));
assert (WAIT_FOR_HEARTBEATS * 10) > WAIT_FOR_DEATH;
DFSTestUtil.waitForDatanodeStatus(dm,3,0,2,origCapacity - (1 * dnCapacity),WAIT_FOR_HEARTBEATS);
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn3Vol1,false));
Path file2=new Path("/test2");
DFSTestUtil.createFile(fs,file2,1024,(short)3,1L);
DFSTestUtil.waitReplication(fs,file2,(short)3);
assertTrue("DN3 should still be up",dns.get(2).isDatanodeUp());
assertCounter("VolumeFailures",1L,getMetrics(dns.get(2).getMetrics().name()));
ArrayList live=new ArrayList();
ArrayList dead=new ArrayList();
dm.fetchDatanodes(live,dead,false);
live.clear();
dead.clear();
dm.fetchDatanodes(live,dead,false);
assertEquals("DN3 should have 1 failed volume",1,live.get(2).getVolumeFailures());
dnCapacity=DFSTestUtil.getDatanodeCapacity(dm,0);
DFSTestUtil.waitForDatanodeStatus(dm,3,0,3,origCapacity - (3 * dnCapacity),WAIT_FOR_HEARTBEATS);
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn3Vol2,false));
Path file3=new Path("/test3");
DFSTestUtil.createFile(fs,file3,1024,(short)3,1L);
DFSTestUtil.waitReplication(fs,file3,(short)2);
DFSTestUtil.waitForDatanodeDeath(dns.get(2));
assertCounter("VolumeFailures",2L,getMetrics(dns.get(2).getMetrics().name()));
DFSTestUtil.waitForDatanodeStatus(dm,2,1,2,origCapacity - (4 * dnCapacity),WAIT_FOR_HEARTBEATS);
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn1Vol1,true));
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn2Vol1,true));
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn3Vol1,true));
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn3Vol2,true));
cluster.restartDataNodes();
cluster.waitActive();
Path file4=new Path("/test4");
DFSTestUtil.createFile(fs,file4,1024,(short)3,1L);
DFSTestUtil.waitReplication(fs,file4,(short)3);
DFSTestUtil.waitForDatanodeStatus(dm,3,0,0,origCapacity,WAIT_FOR_HEARTBEATS);
}
InternalCallVerifierAssumptionSetterEqualityVerifierHybridVerifier
/**
* Test that a volume that is considered failed on startup is seen as
* a failed volume by the NN.
*/
@Test public void testFailedVolumeOnStartupIsCounted() throws Exception {
assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
final DatanodeManager dm=cluster.getNamesystem().getBlockManager().getDatanodeManager();
long origCapacity=DFSTestUtil.getLiveDatanodeCapacity(dm);
File dir=new File(cluster.getInstanceStorageDir(0,0),"current");
try {
prepareDirToFail(dir);
restartDatanodes(1,false);
assertEquals(true,cluster.getDataNodes().get(0).isBPServiceAlive(cluster.getNamesystem().getBlockPoolId()));
DFSTestUtil.waitForDatanodeStatus(dm,1,0,1,origCapacity / 2,WAIT_FOR_HEARTBEATS);
}
finally {
FileUtil.chmod(dir.toString(),"755");
}
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testRecoverTransitionReadFailure() throws IOException {
final int numLocations=3;
List locations=createStorageLocations(numLocations,true);
try {
storage.recoverTransitionRead(mockDN,nsInfo,locations,START_OPT);
fail("An IOException should throw: all StorageLocations are NON_EXISTENT");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("All specified directories are not accessible or do not exist.",e);
}
assertEquals(0,storage.getNumStorageDirs());
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* This test enforces the behavior that if there is an exception from
* doTransition() during DN starts up, the storage directories that have
* already been processed are still visible, i.e., in
* DataStorage.storageDirs().
*/
@Test public void testRecoverTransitionReadDoTransitionFailure() throws IOException {
final int numLocations=3;
List locations=createStorageLocations(numLocations);
String bpid=nsInfo.getBlockPoolID();
storage.recoverTransitionRead(mockDN,bpid,nsInfo,locations,START_OPT);
storage.unlockAll();
storage=new DataStorage();
nsInfo.clusterID="cluster1";
try {
storage.recoverTransitionRead(mockDN,bpid,nsInfo,locations,START_OPT);
fail("Expect to throw an exception from doTransition()");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Incompatible clusterIDs",e);
}
assertEquals(numLocations,storage.getNumStorageDirs());
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testAddStorageDirectories() throws IOException, URISyntaxException {
final int numLocations=3;
final int numNamespace=3;
List locations=createStorageLocations(numLocations);
List namespaceInfos=createNamespaceInfos(numNamespace);
for ( NamespaceInfo ni : namespaceInfos) {
storage.addStorageLocations(mockDN,ni,locations,START_OPT);
for ( StorageLocation sl : locations) {
checkDir(sl.getFile());
checkDir(sl.getFile(),ni.getBlockPoolID());
}
}
assertEquals(numLocations,storage.getNumStorageDirs());
locations=createStorageLocations(numLocations);
try {
storage.addStorageLocations(mockDN,namespaceInfos.get(0),locations,START_OPT);
fail("Expected to throw IOException: adding active directories.");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("All specified directories are not accessible or do not exist.",e);
}
assertEquals(numLocations,storage.getNumStorageDirs());
locations=createStorageLocations(6);
storage.addStorageLocations(mockDN,nsInfo,locations,START_OPT);
assertEquals(6,storage.getNumStorageDirs());
}
UtilityVerifierEqualityVerifierHybridVerifier
@Test public void testDifferentLayoutVersions() throws Exception {
assertEquals(HdfsConstants.NAMENODE_LAYOUT_VERSION,actor.retrieveNamespaceInfo().getLayoutVersion());
doReturn(HdfsConstants.NAMENODE_LAYOUT_VERSION * 1000).when(fakeNsInfo).getLayoutVersion();
try {
actor.retrieveNamespaceInfo();
}
catch ( IOException e) {
fail("Should not fail to retrieve NS info from DN with different layout version");
}
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testSoftwareVersionDifferences() throws Exception {
assertEquals(VersionInfo.getVersion(),actor.retrieveNamespaceInfo().getSoftwareVersion());
doReturn("4.0.0").when(fakeNsInfo).getSoftwareVersion();
doReturn("3.0.0").when(mockDnConf).getMinimumNameNodeVersion();
assertEquals("4.0.0",actor.retrieveNamespaceInfo().getSoftwareVersion());
doReturn("3.0.0").when(fakeNsInfo).getSoftwareVersion();
doReturn("4.0.0").when(mockDnConf).getMinimumNameNodeVersion();
try {
actor.retrieveNamespaceInfo();
fail("Should have thrown an exception for NN with too-low version");
}
catch ( IncorrectVersionException ive) {
GenericTestUtils.assertExceptionContains("The reported NameNode version is too low",ive);
LOG.info("Got expected exception",ive);
}
}
InternalCallVerifierEqualityVerifier
/**
* Test that when there is a failure replicating a block the temporary
* and meta files are cleaned up and subsequent replication succeeds.
*/
@Test public void testReplicationError() throws Exception {
final Path fileName=new Path("/test.txt");
final int fileLen=1;
DFSTestUtil.createFile(fs,fileName,1,(short)1,1L);
DFSTestUtil.waitReplication(fs,fileName,(short)1);
LocatedBlocks blocks=NameNodeAdapter.getBlockLocations(cluster.getNameNode(),fileName.toString(),0,(long)fileLen);
assertEquals("Should only find 1 block",blocks.locatedBlockCount(),1);
LocatedBlock block=blocks.get(0);
cluster.startDataNodes(conf,1,true,null,null);
cluster.waitActive();
final int sndNode=1;
DataNode datanode=cluster.getDataNodes().get(sndNode);
InetSocketAddress target=datanode.getXferAddress();
Socket s=new Socket(target.getAddress(),target.getPort());
DataOutputStream out=new DataOutputStream(s.getOutputStream());
DataChecksum checksum=DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,512);
new Sender(out).writeBlock(block.getBlock(),StorageType.DEFAULT,BlockTokenSecretManager.DUMMY_TOKEN,"",new DatanodeInfo[0],new StorageType[0],null,BlockConstructionStage.PIPELINE_SETUP_CREATE,1,0L,0L,0L,checksum,CachingStrategy.newDefaultStrategy());
out.flush();
out.close();
String bpid=cluster.getNamesystem().getBlockPoolId();
File storageDir=cluster.getInstanceStorageDir(sndNode,0);
File dir1=MiniDFSCluster.getRbwDir(storageDir,bpid);
storageDir=cluster.getInstanceStorageDir(sndNode,1);
File dir2=MiniDFSCluster.getRbwDir(storageDir,bpid);
while (dir1.listFiles().length != 0 || dir2.listFiles().length != 0) {
Thread.sleep(100);
}
fs.setReplication(fileName,(short)2);
DFSTestUtil.waitReplication(fs,fileName,(short)1);
fs.delete(fileName,false);
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Check that the permissions of the local DN directories are as expected.
*/
@Test public void testLocalDirs() throws Exception {
Configuration conf=new Configuration();
final String permStr=conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY);
FsPermission expected=new FsPermission(permStr);
FileSystem localFS=FileSystem.getLocal(conf);
for ( DataNode dn : cluster.getDataNodes()) {
for ( FsVolumeSpi v : dn.getFSDataset().getVolumes()) {
String dir=v.getBasePath();
Path dataDir=new Path(dir);
FsPermission actual=localFS.getFileStatus(dataDir).getPermission();
assertEquals("Permission for dir: " + dataDir + ", is "+ actual+ ", while expected is "+ expected,expected,actual);
}
}
}
TestInitializerInternalCallVerifierEqualityVerifierHybridVerifier
@Before public void setUp() throws IOException {
final DataNode datanode=Mockito.mock(DataNode.class);
storage=Mockito.mock(DataStorage.class);
Configuration conf=new Configuration();
final DNConf dnConf=new DNConf(conf);
when(datanode.getConf()).thenReturn(conf);
when(datanode.getDnConf()).thenReturn(dnConf);
createStorageDirs(storage,conf,NUM_INIT_VOLUMES);
dataset=new FsDatasetImpl(datanode,storage,conf);
assertEquals(NUM_INIT_VOLUMES,dataset.getVolumes().size());
assertEquals(0,dataset.getNumFailedVolumes());
}
IterativeVerifierInternalCallVerifierEqualityVerifier
@Test public void testAddVolumes() throws IOException {
final int numNewVolumes=3;
final int numExistingVolumes=dataset.getVolumes().size();
final int totalVolumes=numNewVolumes + numExistingVolumes;
List newLocations=new ArrayList();
for (int i=0; i < numNewVolumes; i++) {
String path=BASE_DIR + "/newData" + i;
newLocations.add(StorageLocation.parse(path));
when(storage.getStorageDir(numExistingVolumes + i)).thenReturn(new Storage.StorageDirectory(new File(path)));
}
when(storage.getNumStorageDirs()).thenReturn(totalVolumes);
dataset.addVolumes(newLocations);
assertEquals(totalVolumes,dataset.getVolumes().size());
for (int i=0; i < numNewVolumes; i++) {
assertEquals(newLocations.get(i).getFile().getPath(),dataset.getVolumes().get(numExistingVolumes + i).getBasePath());
}
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test for{@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock,long,long)}
*/
@Test public void testUpdateReplicaUnderRecovery() throws IOException {
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
String bpid=cluster.getNamesystem().getBlockPoolId();
DistributedFileSystem dfs=cluster.getFileSystem();
String filestr="/foo";
Path filepath=new Path(filestr);
DFSTestUtil.createFile(dfs,filepath,1024L,(short)3,0L);
final LocatedBlock locatedblock=getLastLocatedBlock(DFSClientAdapter.getDFSClient(dfs).getNamenode(),filestr);
final DatanodeInfo[] datanodeinfo=locatedblock.getLocations();
Assert.assertTrue(datanodeinfo.length > 0);
final DataNode datanode=cluster.getDataNode(datanodeinfo[0].getIpcPort());
Assert.assertTrue(datanode != null);
final ExtendedBlock b=locatedblock.getBlock();
final long recoveryid=b.getGenerationStamp() + 1;
final long newlength=b.getNumBytes() - 1;
final FsDatasetSpi> fsdataset=DataNodeTestUtils.getFSDataset(datanode);
final ReplicaRecoveryInfo rri=fsdataset.initReplicaRecovery(new RecoveringBlock(b,null,recoveryid));
final ReplicaInfo replica=FsDatasetTestUtil.fetchReplicaInfo(fsdataset,bpid,b.getBlockId());
Assert.assertEquals(ReplicaState.RUR,replica.getState());
FsDatasetImpl.checkReplicaFiles(replica);
{
final ExtendedBlock tmp=new ExtendedBlock(b.getBlockPoolId(),rri.getBlockId(),rri.getNumBytes() - 1,rri.getGenerationStamp());
try {
fsdataset.updateReplicaUnderRecovery(tmp,recoveryid,newlength);
Assert.fail();
}
catch ( IOException ioe) {
System.out.println("GOOD: getting " + ioe);
}
}
final String storageID=fsdataset.updateReplicaUnderRecovery(new ExtendedBlock(b.getBlockPoolId(),rri),recoveryid,newlength);
assertTrue(storageID != null);
}
finally {
if (cluster != null) cluster.shutdown();
}
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test {@link FsDatasetImpl#initReplicaRecovery(String,ReplicaMap,Block,long,long)}
*/
@Test public void testInitReplicaRecovery() throws IOException {
final long firstblockid=10000L;
final long gs=7777L;
final long length=22L;
final ReplicaMap map=new ReplicaMap(this);
String bpid="BP-TEST";
final Block[] blocks=new Block[5];
for (int i=0; i < blocks.length; i++) {
blocks[i]=new Block(firstblockid + i,length,gs);
map.add(bpid,createReplicaInfo(blocks[i]));
}
{
final Block b=blocks[0];
final ReplicaInfo originalInfo=map.get(bpid,b);
final long recoveryid=gs + 1;
final ReplicaRecoveryInfo recoveryInfo=FsDatasetImpl.initReplicaRecovery(bpid,map,blocks[0],recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
assertEquals(originalInfo,recoveryInfo);
final ReplicaUnderRecovery updatedInfo=(ReplicaUnderRecovery)map.get(bpid,b);
Assert.assertEquals(originalInfo.getBlockId(),updatedInfo.getBlockId());
Assert.assertEquals(recoveryid,updatedInfo.getRecoveryID());
final long recoveryid2=gs + 2;
final ReplicaRecoveryInfo recoveryInfo2=FsDatasetImpl.initReplicaRecovery(bpid,map,blocks[0],recoveryid2,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
assertEquals(originalInfo,recoveryInfo2);
final ReplicaUnderRecovery updatedInfo2=(ReplicaUnderRecovery)map.get(bpid,b);
Assert.assertEquals(originalInfo.getBlockId(),updatedInfo2.getBlockId());
Assert.assertEquals(recoveryid2,updatedInfo2.getRecoveryID());
try {
FsDatasetImpl.initReplicaRecovery(bpid,map,b,recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
Assert.fail();
}
catch ( RecoveryInProgressException ripe) {
System.out.println("GOOD: getting " + ripe);
}
}
{
final long recoveryid=gs + 1;
final Block b=new Block(firstblockid - 1,length,gs);
ReplicaRecoveryInfo r=FsDatasetImpl.initReplicaRecovery(bpid,map,b,recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
Assert.assertNull("Data-node should not have this replica.",r);
}
{
final long recoveryid=gs - 1;
final Block b=new Block(firstblockid + 1,length,gs);
try {
FsDatasetImpl.initReplicaRecovery(bpid,map,b,recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
Assert.fail();
}
catch ( IOException ioe) {
System.out.println("GOOD: getting " + ioe);
}
}
{
final long recoveryid=gs + 1;
final Block b=new Block(firstblockid,length,gs + 1);
try {
FsDatasetImpl.initReplicaRecovery(bpid,map,b,recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
fail("InitReplicaRecovery should fail because replica's " + "gs is less than the block's gs");
}
catch ( IOException e) {
e.getMessage().startsWith("replica.getGenerationStamp() < block.getGenerationStamp(), block=");
}
}
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Tests for setting xattr
* 1. Set xattr with XAttrSetFlag.CREATE|XAttrSetFlag.REPLACE flag.
* 2. Set xattr with illegal name.
* 3. Set xattr without XAttrSetFlag.
* 4. Set xattr and total number exceeds max limit.
* 5. Set xattr and name is too long.
* 6. Set xattr and value is too long.
*/
@Test(timeout=120000) public void testSetXAttr() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
Map xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),1);
Assert.assertArrayEquals(value1,xattrs.get(name1));
fs.removeXAttr(path,name1);
try {
fs.setXAttr(path,null,value1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
Assert.fail("Setting xattr with null name should fail.");
}
catch ( NullPointerException e) {
GenericTestUtils.assertExceptionContains("XAttr name cannot be null",e);
}
catch ( RemoteException e) {
GenericTestUtils.assertExceptionContains("XAttr name cannot be null",e);
}
try {
fs.setXAttr(path,"user.",value1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
Assert.fail("Setting xattr with empty name should fail.");
}
catch ( RemoteException e) {
assertEquals("Unexpected RemoteException: " + e,e.getClassName(),HadoopIllegalArgumentException.class.getCanonicalName());
GenericTestUtils.assertExceptionContains("XAttr name cannot be empty",e);
}
catch ( HadoopIllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("XAttr name cannot be empty",e);
}
try {
fs.setXAttr(path,"a1",value1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
Assert.fail("Setting xattr with invalid name prefix or without " + "name prefix should fail.");
}
catch ( RemoteException e) {
assertEquals("Unexpected RemoteException: " + e,e.getClassName(),HadoopIllegalArgumentException.class.getCanonicalName());
GenericTestUtils.assertExceptionContains("XAttr name must be prefixed",e);
}
catch ( HadoopIllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("XAttr name must be prefixed",e);
}
fs.setXAttr(path,name1,value1);
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),1);
Assert.assertArrayEquals(value1,xattrs.get(name1));
fs.removeXAttr(path,name1);
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name1,newValue1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),1);
Assert.assertArrayEquals(newValue1,xattrs.get(name1));
fs.removeXAttr(path,name1);
fs.setXAttr(path,name1,value1);
fs.setXAttr(path,name2,value2);
fs.setXAttr(path,name3,null);
try {
fs.setXAttr(path,name4,null);
Assert.fail("Setting xattr should fail if total number of xattrs " + "for inode exceeds max limit.");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Cannot add additional XAttr",e);
}
fs.removeXAttr(path,name1);
fs.removeXAttr(path,name2);
fs.removeXAttr(path,name3);
String longName="user.0123456789abcdefX";
try {
fs.setXAttr(path,longName,null);
Assert.fail("Setting xattr should fail if name is too long.");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("XAttr is too big",e);
GenericTestUtils.assertExceptionContains("total size is 17",e);
}
byte[] longValue=new byte[MAX_SIZE];
try {
fs.setXAttr(path,"user.a",longValue);
Assert.fail("Setting xattr should fail if value is too long.");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("XAttr is too big",e);
GenericTestUtils.assertExceptionContains("total size is 17",e);
}
String name="user.111";
byte[] value=new byte[MAX_SIZE - 3];
fs.setXAttr(path,name,value);
}
InternalCallVerifierEqualityVerifier
/**
* Steps:
* 1) Set xattrs on a file.
* 2) Remove xattrs from that file.
* 3) Save a checkpoint and restart NN.
* 4) Set xattrs again on the same file.
* 5) Remove xattrs from that file.
* 6) Restart NN without saving a checkpoint.
* 7) Set xattrs again on the same file.
*/
@Test(timeout=120000) public void testCleanupXAttrs() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE));
fs.removeXAttr(path,name1);
fs.removeXAttr(path,name2);
restart(true);
initFileSystem();
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE));
fs.removeXAttr(path,name1);
fs.removeXAttr(path,name2);
restart(false);
initFileSystem();
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE));
fs.removeXAttr(path,name1);
fs.removeXAttr(path,name2);
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE));
Map xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(value2,xattrs.get(name2));
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Tests for replacing xattr
* 1. Replace an xattr using XAttrSetFlag.REPLACE.
* 2. Replace an xattr which doesn't exist and expect an exception.
* 3. Create multiple xattrs and replace some.
* 4. Restart NN and save checkpoint scenarios.
*/
@Test(timeout=120000) public void testReplaceXAttr() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name1,newValue1,EnumSet.of(XAttrSetFlag.REPLACE));
Map xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),1);
Assert.assertArrayEquals(newValue1,xattrs.get(name1));
fs.removeXAttr(path,name1);
try {
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.REPLACE));
Assert.fail("Replacing xattr which does not exist should fail.");
}
catch ( IOException e) {
}
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name2,null,EnumSet.of(XAttrSetFlag.REPLACE));
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(new byte[0],xattrs.get(name2));
restart(false);
initFileSystem();
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(new byte[0],xattrs.get(name2));
restart(true);
initFileSystem();
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(new byte[0],xattrs.get(name2));
fs.removeXAttr(path,name1);
fs.removeXAttr(path,name2);
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* removexattr tests. Test that removexattr throws an exception if any of
* the following are true:
* an xattr that was requested doesn't exist
* the caller specifies an unknown namespace
* the caller doesn't have access to the namespace
* the caller doesn't have permission to get the value of the xattr
* the caller does not have "execute" (scan) access to the parent directory
* the caller has only read access to the owning directory
* the caller has only execute access to the owning directory and execute
* access to the actual entity
* the caller does not have execute access to the owning directory and write
* access to the actual entity
*/
@Test(timeout=120000) public void testRemoveXAttrPermissions() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name3,null,EnumSet.of(XAttrSetFlag.CREATE));
try {
fs.removeXAttr(path,name2);
fs.removeXAttr(path,name2);
Assert.fail("expected IOException");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("No matching attributes found",e);
}
final String expectedExceptionString="An XAttr name must be prefixed " + "with user/trusted/security/system/raw, followed by a '.'";
try {
fs.removeXAttr(path,"wackynamespace.foo");
Assert.fail("expected IOException");
}
catch ( RemoteException e) {
assertEquals("Unexpected RemoteException: " + e,e.getClassName(),HadoopIllegalArgumentException.class.getCanonicalName());
GenericTestUtils.assertExceptionContains(expectedExceptionString,e);
}
catch ( HadoopIllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(expectedExceptionString,e);
}
final UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"});
fs.setXAttr(path,"trusted.foo","1234".getBytes());
try {
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
userFs.removeXAttr(path,"trusted.foo");
return null;
}
}
);
Assert.fail("expected IOException");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("User doesn't have permission",e);
}
finally {
fs.removeXAttr(path,"trusted.foo");
}
fs.setPermission(path,new FsPermission((short)0700));
try {
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
userFs.removeXAttr(path,name1);
return null;
}
}
);
Assert.fail("expected IOException");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied",e);
}
final Path childDir=new Path(path,"child" + pathCount);
FileSystem.mkdirs(fs,childDir,FsPermission.createImmutable((short)0700));
fs.setXAttr(childDir,name1,"1234".getBytes());
try {
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
userFs.removeXAttr(childDir,name1);
return null;
}
}
);
Assert.fail("expected IOException");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied",e);
}
fs.setPermission(path,new FsPermission((short)0704));
try {
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
userFs.removeXAttr(childDir,name1);
return null;
}
}
);
Assert.fail("expected IOException");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied",e);
}
fs.setPermission(path,new FsPermission((short)0701));
fs.setPermission(childDir,new FsPermission((short)0701));
try {
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
userFs.removeXAttr(childDir,name1);
return null;
}
}
);
Assert.fail("expected IOException");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied",e);
}
fs.setPermission(path,new FsPermission((short)0701));
fs.setPermission(childDir,new FsPermission((short)0706));
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
userFs.removeXAttr(childDir,name1);
return null;
}
}
);
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Tests for creating xattr
* 1. Create an xattr using XAttrSetFlag.CREATE.
* 2. Create an xattr which already exists and expect an exception.
* 3. Create multiple xattrs.
* 4. Restart NN and save checkpoint scenarios.
*/
@Test(timeout=120000) public void testCreateXAttr() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
Map xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),1);
Assert.assertArrayEquals(value1,xattrs.get(name1));
fs.removeXAttr(path,name1);
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),0);
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
try {
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
Assert.fail("Creating xattr which already exists should fail.");
}
catch ( IOException e) {
}
fs.removeXAttr(path,name1);
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name2,null,EnumSet.of(XAttrSetFlag.CREATE));
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(new byte[0],xattrs.get(name2));
restart(false);
initFileSystem();
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(new byte[0],xattrs.get(name2));
restart(true);
initFileSystem();
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(new byte[0],xattrs.get(name2));
fs.removeXAttr(path,name1);
fs.removeXAttr(path,name2);
}
UtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=120000) public void testRawXAttrs() throws Exception {
final UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"});
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
fs.setXAttr(rawPath,raw1,value1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
{
final byte[] value=fs.getXAttr(rawPath,raw1);
Assert.assertArrayEquals(value,value1);
}
{
final Map xattrs=fs.getXAttrs(rawPath);
Assert.assertEquals(xattrs.size(),1);
Assert.assertArrayEquals(value1,xattrs.get(raw1));
fs.removeXAttr(rawPath,raw1);
}
{
fs.setXAttr(rawPath,raw1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(rawPath,raw1,newValue1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
final Map xattrs=fs.getXAttrs(rawPath);
Assert.assertEquals(xattrs.size(),1);
Assert.assertArrayEquals(newValue1,xattrs.get(raw1));
fs.removeXAttr(rawPath,raw1);
}
{
fs.setXAttr(rawPath,raw1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(rawPath,raw2,value2,EnumSet.of(XAttrSetFlag.CREATE));
final List xattrNames=fs.listXAttrs(rawPath);
assertTrue(xattrNames.contains(raw1));
assertTrue(xattrNames.contains(raw2));
assertTrue(xattrNames.size() == 2);
fs.removeXAttr(rawPath,raw1);
fs.removeXAttr(rawPath,raw2);
}
{
fs.setXAttr(rawPath,raw1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(rawPath,raw2,value2,EnumSet.of(XAttrSetFlag.CREATE));
final List xattrNames=fs.listXAttrs(path);
assertTrue(xattrNames.size() == 0);
fs.removeXAttr(rawPath,raw1);
fs.removeXAttr(rawPath,raw2);
}
{
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
try {
userFs.setXAttr(path,raw1,value1);
fail("setXAttr should have thrown");
}
catch ( AccessControlException e) {
}
try {
userFs.setXAttr(rawPath,raw1,value1);
fail("setXAttr should have thrown");
}
catch ( AccessControlException e) {
}
try {
userFs.getXAttrs(rawPath);
fail("getXAttrs should have thrown");
}
catch ( AccessControlException e) {
}
try {
userFs.getXAttrs(path);
fail("getXAttrs should have thrown");
}
catch ( AccessControlException e) {
}
try {
userFs.getXAttr(rawPath,raw1);
fail("getXAttr should have thrown");
}
catch ( AccessControlException e) {
}
try {
userFs.getXAttr(path,raw1);
fail("getXAttr should have thrown");
}
catch ( AccessControlException e) {
}
return null;
}
}
);
}
{
fs.setXAttr(rawPath,raw1,value1);
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
try {
userFs.getXAttr(rawPath,raw1);
fail("getXAttr should have thrown");
}
catch ( AccessControlException e) {
}
try {
userFs.getXAttr(path,raw1);
fail("getXAttr should have thrown");
}
catch ( AccessControlException e) {
}
final List xattrNames=userFs.listXAttrs(path);
assertTrue(xattrNames.size() == 0);
try {
userFs.listXAttrs(rawPath);
fail("listXAttrs on raw path should have thrown");
}
catch ( AccessControlException e) {
}
return null;
}
}
);
fs.removeXAttr(rawPath,raw1);
}
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test(timeout=120000) public void testXAttrAcl() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
fs.setOwner(path,BRUCE.getUserName(),null);
FileSystem fsAsBruce=createFileSystem(BRUCE);
FileSystem fsAsDiana=createFileSystem(DIANA);
fsAsBruce.setXAttr(path,name1,value1);
Map xattrs;
try {
xattrs=fsAsDiana.getXAttrs(path);
Assert.fail("Diana should not have read access to get xattrs");
}
catch ( AccessControlException e) {
}
fsAsBruce.modifyAclEntries(path,Lists.newArrayList(aclEntry(ACCESS,USER,DIANA.getUserName(),READ)));
xattrs=fsAsDiana.getXAttrs(path);
Assert.assertArrayEquals(value1,xattrs.get(name1));
try {
fsAsDiana.removeXAttr(path,name1);
Assert.fail("Diana should not have write access to remove xattrs");
}
catch ( AccessControlException e) {
}
try {
fsAsDiana.setXAttr(path,name2,value2);
Assert.fail("Diana should not have write access to set xattrs");
}
catch ( AccessControlException e) {
}
fsAsBruce.modifyAclEntries(path,Lists.newArrayList(aclEntry(ACCESS,USER,DIANA.getUserName(),ALL)));
fsAsDiana.setXAttr(path,name2,value2);
Assert.assertArrayEquals(value2,fsAsDiana.getXAttrs(path).get(name2));
fsAsDiana.removeXAttr(path,name1);
fsAsDiana.removeXAttr(path,name2);
}
EqualityVerifier
@Test public void testFilterAclEntriesByAclSpecAccessMaskCalculated() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ)).add(aclEntry(ACCESS,USER,"diana",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ_WRITE)).add(aclEntry(ACCESS,OTHER,READ)).build();
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"diana"));
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ)).add(aclEntry(ACCESS,OTHER,READ)).build();
assertEquals(expected,filterAclEntriesByAclSpec(existing,aclSpec));
}
EqualityVerifier
@Test public void testMergeAclEntriesAutomaticDefaultGroup() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).build();
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,READ_EXECUTE),aclEntry(DEFAULT,OTHER,READ));
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,READ_EXECUTE)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,OTHER,READ)).build();
assertEquals(expected,mergeAclEntries(existing,aclSpec));
}
EqualityVerifier
@Test public void testReplaceAclEntriesDefaultMaskPreserved() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ)).add(aclEntry(ACCESS,USER,"diana",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ_WRITE)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"diana",ALL)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ)).add(aclEntry(DEFAULT,OTHER,NONE)).build();
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"bruce",READ),aclEntry(ACCESS,USER,"diana",READ_WRITE),aclEntry(ACCESS,GROUP,ALL),aclEntry(ACCESS,OTHER,READ));
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ)).add(aclEntry(ACCESS,USER,"diana",READ_WRITE)).add(aclEntry(ACCESS,GROUP,ALL)).add(aclEntry(ACCESS,MASK,ALL)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"diana",ALL)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ)).add(aclEntry(DEFAULT,OTHER,NONE)).build();
assertEquals(expected,replaceAclEntries(existing,aclSpec));
}
EqualityVerifier
@Test public void testFilterAclEntriesByAclSpec() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ_WRITE)).add(aclEntry(ACCESS,USER,"diana",READ_EXECUTE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,GROUP,"sales",READ_EXECUTE)).add(aclEntry(ACCESS,GROUP,"execs",READ_WRITE)).add(aclEntry(ACCESS,MASK,ALL)).add(aclEntry(ACCESS,OTHER,READ)).build();
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"diana"),aclEntry(ACCESS,GROUP,"sales"));
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,GROUP,"execs",READ_WRITE)).add(aclEntry(ACCESS,MASK,READ_WRITE)).add(aclEntry(ACCESS,OTHER,READ)).build();
assertEquals(expected,filterAclEntriesByAclSpec(existing,aclSpec));
}
EqualityVerifier
@Test public void testFilterAclEntriesByAclSpecDefaultMaskCalculated() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,USER,"diana",READ_WRITE)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ_WRITE)).add(aclEntry(DEFAULT,OTHER,NONE)).build();
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"diana"));
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ)).add(aclEntry(DEFAULT,OTHER,NONE)).build();
assertEquals(expected,filterAclEntriesByAclSpec(existing,aclSpec));
}
EqualityVerifier
@Test public void testMergeAclEntriesProvidedAccessMask() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,NONE)).build();
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"bruce",READ_EXECUTE),aclEntry(ACCESS,MASK,ALL));
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ_EXECUTE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,ALL)).add(aclEntry(ACCESS,OTHER,NONE)).build();
assertEquals(expected,mergeAclEntries(existing,aclSpec));
}
EqualityVerifier
@Test public void testReplaceAclEntries() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",ALL)).add(aclEntry(ACCESS,GROUP,READ_EXECUTE)).add(aclEntry(ACCESS,MASK,ALL)).add(aclEntry(ACCESS,OTHER,NONE)).build();
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"bruce",READ_WRITE),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,GROUP,"sales",ALL),aclEntry(ACCESS,MASK,ALL),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"bruce",READ_WRITE),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,GROUP,"sales",ALL),aclEntry(DEFAULT,MASK,ALL),aclEntry(DEFAULT,OTHER,NONE));
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ_EXECUTE)).add(aclEntry(ACCESS,GROUP,"sales",ALL)).add(aclEntry(ACCESS,MASK,ALL)).add(aclEntry(ACCESS,OTHER,NONE)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ_WRITE)).add(aclEntry(DEFAULT,GROUP,READ_EXECUTE)).add(aclEntry(DEFAULT,GROUP,"sales",ALL)).add(aclEntry(DEFAULT,MASK,ALL)).add(aclEntry(DEFAULT,OTHER,NONE)).build();
assertEquals(expected,replaceAclEntries(existing,aclSpec));
}
EqualityVerifier
@Test public void testMergeAclEntriesDefaultMaskPreserved() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"diana",ALL)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ)).add(aclEntry(DEFAULT,OTHER,NONE)).build();
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"diana",FsAction.READ_EXECUTE));
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"diana",READ_EXECUTE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ_EXECUTE)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"diana",ALL)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ)).add(aclEntry(DEFAULT,OTHER,NONE)).build();
assertEquals(expected,mergeAclEntries(existing,aclSpec));
}
EqualityVerifier
@Test public void testReplaceAclEntriesOnlyDefaults() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,NONE)).build();
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"bruce",READ));
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,NONE)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ)).add(aclEntry(DEFAULT,OTHER,NONE)).build();
assertEquals(expected,replaceAclEntries(existing,aclSpec));
}
EqualityVerifier
@Test public void testMergeAclEntriesDefaultMaskCalculated() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ)).add(aclEntry(DEFAULT,OTHER,NONE)).build();
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"bruce",READ_WRITE),aclEntry(DEFAULT,USER,"diana",READ_EXECUTE));
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ_WRITE)).add(aclEntry(DEFAULT,USER,"diana",READ_EXECUTE)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,ALL)).add(aclEntry(DEFAULT,OTHER,NONE)).build();
assertEquals(expected,mergeAclEntries(existing,aclSpec));
}
EqualityVerifier
@Test public void testFilterAclEntriesByAclSpecAutomaticDefaultUser() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,READ_WRITE)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ)).add(aclEntry(DEFAULT,OTHER,NONE)).build();
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER));
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ)).add(aclEntry(DEFAULT,OTHER,NONE)).build();
assertEquals(expected,filterAclEntriesByAclSpec(existing,aclSpec));
}
EqualityVerifier
@Test public void testReplaceAclEntriesUnchanged() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",ALL)).add(aclEntry(ACCESS,GROUP,READ_EXECUTE)).add(aclEntry(ACCESS,GROUP,"sales",ALL)).add(aclEntry(ACCESS,MASK,ALL)).add(aclEntry(ACCESS,OTHER,NONE)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",ALL)).add(aclEntry(DEFAULT,GROUP,READ_EXECUTE)).add(aclEntry(DEFAULT,GROUP,"sales",ALL)).add(aclEntry(DEFAULT,MASK,ALL)).add(aclEntry(DEFAULT,OTHER,NONE)).build();
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"bruce",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,GROUP,"sales",ALL),aclEntry(ACCESS,MASK,ALL),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"bruce",ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,GROUP,"sales",ALL),aclEntry(DEFAULT,MASK,ALL),aclEntry(DEFAULT,OTHER,NONE));
assertEquals(existing,replaceAclEntries(existing,aclSpec));
}
EqualityVerifier
@Test public void testMergeAclEntriesMultipleNewBeforeExisting() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"diana",READ)).add(aclEntry(ACCESS,GROUP,READ_EXECUTE)).add(aclEntry(ACCESS,MASK,READ_EXECUTE)).add(aclEntry(ACCESS,OTHER,NONE)).build();
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"bruce",READ_EXECUTE),aclEntry(ACCESS,USER,"clark",READ_EXECUTE),aclEntry(ACCESS,USER,"diana",READ_EXECUTE));
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ_EXECUTE)).add(aclEntry(ACCESS,USER,"clark",READ_EXECUTE)).add(aclEntry(ACCESS,USER,"diana",READ_EXECUTE)).add(aclEntry(ACCESS,GROUP,READ_EXECUTE)).add(aclEntry(ACCESS,MASK,READ_EXECUTE)).add(aclEntry(ACCESS,OTHER,NONE)).build();
assertEquals(expected,mergeAclEntries(existing,aclSpec));
}
EqualityVerifier
@Test public void testFilterAclEntriesByAclSpecUnchanged() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",ALL)).add(aclEntry(ACCESS,GROUP,READ_EXECUTE)).add(aclEntry(ACCESS,GROUP,"sales",ALL)).add(aclEntry(ACCESS,MASK,ALL)).add(aclEntry(ACCESS,OTHER,NONE)).build();
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"clark"),aclEntry(ACCESS,GROUP,"execs"));
assertEquals(existing,filterAclEntriesByAclSpec(existing,aclSpec));
}
EqualityVerifier
@Test public void testReplaceAclEntriesAutomaticDefaultOther() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,NONE)).build();
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,GROUP,READ),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,READ_WRITE),aclEntry(DEFAULT,USER,"bruce",READ),aclEntry(DEFAULT,GROUP,READ_WRITE),aclEntry(DEFAULT,MASK,READ_WRITE));
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,NONE)).add(aclEntry(DEFAULT,USER,READ_WRITE)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,GROUP,READ_WRITE)).add(aclEntry(DEFAULT,MASK,READ_WRITE)).add(aclEntry(DEFAULT,OTHER,NONE)).build();
assertEquals(expected,replaceAclEntries(existing,aclSpec));
}
EqualityVerifier
@Test public void testMergeAclEntriesEmptyAclSpec() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,ALL)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ_WRITE)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,ALL)).add(aclEntry(DEFAULT,OTHER,READ)).build();
List aclSpec=Lists.newArrayList();
assertEquals(existing,mergeAclEntries(existing,aclSpec));
}
EqualityVerifier
@Test public void testMergeAclEntriesUnchanged() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",ALL)).add(aclEntry(ACCESS,GROUP,READ_EXECUTE)).add(aclEntry(ACCESS,GROUP,"sales",ALL)).add(aclEntry(ACCESS,MASK,ALL)).add(aclEntry(ACCESS,OTHER,NONE)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",ALL)).add(aclEntry(DEFAULT,GROUP,READ_EXECUTE)).add(aclEntry(DEFAULT,GROUP,"sales",ALL)).add(aclEntry(DEFAULT,MASK,ALL)).add(aclEntry(DEFAULT,OTHER,NONE)).build();
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"bruce",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,GROUP,"sales",ALL),aclEntry(ACCESS,MASK,ALL),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"bruce",ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,GROUP,"sales",ALL),aclEntry(DEFAULT,MASK,ALL),aclEntry(DEFAULT,OTHER,NONE));
assertEquals(existing,mergeAclEntries(existing,aclSpec));
}
EqualityVerifier
@Test public void testFilterDefaultAclEntries() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ_EXECUTE)).add(aclEntry(ACCESS,GROUP,"sales",READ_EXECUTE)).add(aclEntry(ACCESS,MASK,ALL)).add(aclEntry(ACCESS,OTHER,NONE)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ_WRITE)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,GROUP,"sales",READ_EXECUTE)).add(aclEntry(DEFAULT,MASK,READ_WRITE)).add(aclEntry(DEFAULT,OTHER,READ_EXECUTE)).build();
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ_EXECUTE)).add(aclEntry(ACCESS,GROUP,"sales",READ_EXECUTE)).add(aclEntry(ACCESS,MASK,ALL)).add(aclEntry(ACCESS,OTHER,NONE)).build();
assertEquals(expected,filterDefaultAclEntries(existing));
}
EqualityVerifier
@Test public void testMergeAclEntries() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ_EXECUTE)).add(aclEntry(ACCESS,OTHER,NONE)).build();
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"bruce",ALL));
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",ALL)).add(aclEntry(ACCESS,GROUP,READ_EXECUTE)).add(aclEntry(ACCESS,MASK,ALL)).add(aclEntry(ACCESS,OTHER,NONE)).build();
assertEquals(expected,mergeAclEntries(existing,aclSpec));
}
EqualityVerifier
@Test public void testFilterAclEntriesByAclSpecAccessMaskPreserved() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ)).add(aclEntry(ACCESS,USER,"diana",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,USER,"diana",READ_WRITE)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ_WRITE)).add(aclEntry(DEFAULT,OTHER,NONE)).build();
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"diana"));
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ)).add(aclEntry(ACCESS,USER,"diana",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ)).add(aclEntry(DEFAULT,OTHER,NONE)).build();
assertEquals(expected,filterAclEntriesByAclSpec(existing,aclSpec));
}
EqualityVerifier
@Test public void testMergeAclEntriesProvidedDefaultMask() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,NONE)).build();
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,GROUP,READ),aclEntry(DEFAULT,MASK,ALL),aclEntry(DEFAULT,OTHER,NONE));
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,NONE)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,ALL)).add(aclEntry(DEFAULT,OTHER,NONE)).build();
assertEquals(expected,mergeAclEntries(existing,aclSpec));
}
EqualityVerifier
@Test public void testReplaceAclEntriesAccessMaskPreserved() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ)).add(aclEntry(ACCESS,USER,"diana",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,USER,"diana",READ_WRITE)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ_WRITE)).add(aclEntry(DEFAULT,OTHER,NONE)).build();
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"bruce",READ),aclEntry(DEFAULT,GROUP,READ),aclEntry(DEFAULT,OTHER,NONE));
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ)).add(aclEntry(ACCESS,USER,"diana",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ)).add(aclEntry(DEFAULT,OTHER,NONE)).build();
assertEquals(expected,replaceAclEntries(existing,aclSpec));
}
EqualityVerifier
@Test public void testMergeAclEntriesAccessMaskPreserved() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ)).add(aclEntry(ACCESS,USER,"diana",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,USER,"diana",READ_WRITE)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ_WRITE)).add(aclEntry(DEFAULT,OTHER,NONE)).build();
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"diana",READ_EXECUTE));
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ)).add(aclEntry(ACCESS,USER,"diana",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,USER,"diana",READ_EXECUTE)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ_EXECUTE)).add(aclEntry(DEFAULT,OTHER,NONE)).build();
assertEquals(expected,mergeAclEntries(existing,aclSpec));
}
EqualityVerifier
@Test public void testFilterAclEntriesByAclSpecDefaultMaskPreserved() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ)).add(aclEntry(ACCESS,USER,"diana",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ_WRITE)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"diana",ALL)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ)).add(aclEntry(DEFAULT,OTHER,NONE)).build();
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"diana"));
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"diana",ALL)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ)).add(aclEntry(DEFAULT,OTHER,NONE)).build();
assertEquals(expected,filterAclEntriesByAclSpec(existing,aclSpec));
}
EqualityVerifier
@Test public void testMergeAclEntriesAccessMaskCalculated() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ)).add(aclEntry(ACCESS,OTHER,READ)).build();
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"bruce",READ_EXECUTE),aclEntry(ACCESS,USER,"diana",READ));
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ_EXECUTE)).add(aclEntry(ACCESS,USER,"diana",READ)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ_EXECUTE)).add(aclEntry(ACCESS,OTHER,READ)).build();
assertEquals(expected,mergeAclEntries(existing,aclSpec));
}
EqualityVerifier
@Test public void testFilterAclEntriesByAclSpecAutomaticDefaultOther() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,READ_WRITE)).add(aclEntry(DEFAULT,GROUP,READ_WRITE)).add(aclEntry(DEFAULT,OTHER,NONE)).build();
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,OTHER));
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,READ_WRITE)).add(aclEntry(DEFAULT,GROUP,READ_WRITE)).add(aclEntry(DEFAULT,OTHER,READ)).build();
assertEquals(expected,filterAclEntriesByAclSpec(existing,aclSpec));
}
EqualityVerifier
@Test public void testReplaceAclEntriesAutomaticDefaultUser() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,NONE)).build();
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,GROUP,READ),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"bruce",READ),aclEntry(DEFAULT,GROUP,READ_WRITE),aclEntry(DEFAULT,MASK,READ_WRITE),aclEntry(DEFAULT,OTHER,READ));
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,NONE)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,GROUP,READ_WRITE)).add(aclEntry(DEFAULT,MASK,READ_WRITE)).add(aclEntry(DEFAULT,OTHER,READ)).build();
assertEquals(expected,replaceAclEntries(existing,aclSpec));
}
EqualityVerifier
@Test public void testReplaceAclEntriesAutomaticDefaultGroup() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,NONE)).build();
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,GROUP,READ),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,READ_WRITE),aclEntry(DEFAULT,USER,"bruce",READ),aclEntry(DEFAULT,MASK,READ),aclEntry(DEFAULT,OTHER,READ));
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,NONE)).add(aclEntry(DEFAULT,USER,READ_WRITE)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ)).add(aclEntry(DEFAULT,OTHER,READ)).build();
assertEquals(expected,replaceAclEntries(existing,aclSpec));
}
EqualityVerifier
@Test public void testReplaceAclEntriesAccessMaskCalculated() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).build();
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"bruce",READ),aclEntry(ACCESS,USER,"diana",READ_WRITE),aclEntry(ACCESS,GROUP,READ),aclEntry(ACCESS,OTHER,READ));
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ)).add(aclEntry(ACCESS,USER,"diana",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ_WRITE)).add(aclEntry(ACCESS,OTHER,READ)).build();
assertEquals(expected,replaceAclEntries(existing,aclSpec));
}
EqualityVerifier
@Test public void testMergeAclEntriesAutomaticDefaultUser() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).build();
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,OTHER,READ));
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,GROUP,READ_EXECUTE)).add(aclEntry(DEFAULT,OTHER,READ)).build();
assertEquals(expected,mergeAclEntries(existing,aclSpec));
}
EqualityVerifier
@Test public void testFilterAclEntriesByAclSpecAutomaticDefaultGroup() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,READ_WRITE)).add(aclEntry(DEFAULT,GROUP,READ_WRITE)).add(aclEntry(DEFAULT,OTHER,NONE)).build();
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,GROUP));
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,READ_WRITE)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,OTHER,NONE)).build();
assertEquals(expected,filterAclEntriesByAclSpec(existing,aclSpec));
}
EqualityVerifier
@Test public void testMergeAclEntriesAutomaticDefaultOther() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,NONE)).build();
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,READ_EXECUTE),aclEntry(DEFAULT,GROUP,READ_EXECUTE));
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,NONE)).add(aclEntry(DEFAULT,USER,READ_EXECUTE)).add(aclEntry(DEFAULT,GROUP,READ_EXECUTE)).add(aclEntry(DEFAULT,OTHER,NONE)).build();
assertEquals(expected,mergeAclEntries(existing,aclSpec));
}
EqualityVerifier
@Test public void testReplaceAclEntriesDefaultMaskCalculated() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).build();
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,GROUP,READ),aclEntry(ACCESS,OTHER,READ),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"bruce",READ),aclEntry(DEFAULT,USER,"diana",READ_WRITE),aclEntry(DEFAULT,GROUP,ALL),aclEntry(DEFAULT,OTHER,READ));
List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,USER,"diana",READ_WRITE)).add(aclEntry(DEFAULT,GROUP,ALL)).add(aclEntry(DEFAULT,MASK,ALL)).add(aclEntry(DEFAULT,OTHER,READ)).build();
assertEquals(expected,replaceAclEntries(existing,aclSpec));
}
EqualityVerifier
@Test public void testFilterAclEntriesByAclSpecEmptyAclSpec() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,ALL)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ_WRITE)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,ALL)).add(aclEntry(DEFAULT,OTHER,READ)).build();
List aclSpec=Lists.newArrayList();
assertEquals(existing,filterAclEntriesByAclSpec(existing,aclSpec));
}
EqualityVerifier
@Test public void testFilterDefaultAclEntriesUnchanged() throws AclException {
List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",ALL)).add(aclEntry(ACCESS,GROUP,READ_EXECUTE)).add(aclEntry(ACCESS,GROUP,"sales",ALL)).add(aclEntry(ACCESS,MASK,ALL)).add(aclEntry(ACCESS,OTHER,NONE)).build();
assertEquals(existing,filterDefaultAclEntries(existing));
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testAddBlockRetryShouldReturnBlockWithLocations() throws Exception {
final String src="/testAddBlockRetryShouldReturnBlockWithLocations";
NamenodeProtocols nameNodeRpc=cluster.getNameNodeRpc();
nameNodeRpc.create(src,FsPermission.getFileDefault(),"clientName",new EnumSetWritable(EnumSet.of(CreateFlag.CREATE)),true,(short)3,1024,null);
LOG.info("Starting first addBlock for " + src);
LocatedBlock lb1=nameNodeRpc.addBlock(src,"clientName",null,null,INodeId.GRANDFATHER_INODE_ID,null);
assertTrue("Block locations should be present",lb1.getLocations().length > 0);
cluster.restartNameNode();
nameNodeRpc=cluster.getNameNodeRpc();
LocatedBlock lb2=nameNodeRpc.addBlock(src,"clientName",null,null,INodeId.GRANDFATHER_INODE_ID,null);
assertEquals("Blocks are not equal",lb1.getBlock(),lb2.getBlock());
assertTrue("Wrong locations with retry",lb2.getLocations().length > 0);
}
BranchVerifierInternalCallVerifierEqualityVerifier
/**
* Retry addBlock() while another thread is in chooseTarget().
* See HDFS-4452.
*/
@Test public void testRetryAddBlockWhileInChooseTarget() throws Exception {
final String src="/testRetryAddBlockWhileInChooseTarget";
FSNamesystem ns=cluster.getNamesystem();
BlockManager spyBM=spy(ns.getBlockManager());
final NamenodeProtocols nn=cluster.getNameNodeRpc();
Class extends FSNamesystem> nsClass=ns.getClass();
Field bmField=nsClass.getDeclaredField("blockManager");
bmField.setAccessible(true);
bmField.set(ns,spyBM);
doAnswer(new Answer(){
@Override public DatanodeStorageInfo[] answer( InvocationOnMock invocation) throws Throwable {
LOG.info("chooseTarget for " + src);
DatanodeStorageInfo[] ret=(DatanodeStorageInfo[])invocation.callRealMethod();
count++;
if (count == 1) {
LOG.info("Starting second addBlock for " + src);
nn.addBlock(src,"clientName",null,null,INodeId.GRANDFATHER_INODE_ID,null);
LocatedBlocks lbs=nn.getBlockLocations(src,0,Long.MAX_VALUE);
assertEquals("Must be one block",1,lbs.getLocatedBlocks().size());
lb2=lbs.get(0);
assertEquals("Wrong replication",REPLICATION,lb2.getLocations().length);
}
return ret;
}
}
).when(spyBM).chooseTarget(Mockito.anyString(),Mockito.anyInt(),Mockito.any(),Mockito.>any(),Mockito.anyLong(),Mockito.>any());
nn.create(src,FsPermission.getFileDefault(),"clientName",new EnumSetWritable(EnumSet.of(CreateFlag.CREATE)),true,(short)3,1024,null);
LOG.info("Starting first addBlock for " + src);
nn.addBlock(src,"clientName",null,null,INodeId.GRANDFATHER_INODE_ID,null);
LocatedBlocks lbs=nn.getBlockLocations(src,0,Long.MAX_VALUE);
assertEquals("Must be one block",1,lbs.getLocatedBlocks().size());
lb1=lbs.get(0);
assertEquals("Wrong replication",REPLICATION,lb1.getLocations().length);
assertEquals("Blocks are not equal",lb1.getBlock(),lb2.getBlock());
}
BooleanVerifierEqualityVerifierHybridVerifier
/**
* Minor test related to HADOOP-9155. Verify that during a
* FileSystem.setPermission() operation, the stat passed in during the
* logAuditEvent() call returns the new permission rather than the old
* permission.
*/
@Test public void testAuditLoggerWithSetPermission() throws IOException {
Configuration conf=new HdfsConfiguration();
conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,DummyAuditLogger.class.getName());
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitClusterUp();
assertTrue(DummyAuditLogger.initialized);
DummyAuditLogger.resetLogCount();
FileSystem fs=cluster.getFileSystem();
long time=System.currentTimeMillis();
final Path p=new Path("/");
fs.setTimes(p,time,time);
fs.setPermission(p,new FsPermission(TEST_PERMISSION));
assertEquals(TEST_PERMISSION,DummyAuditLogger.foundPermission);
assertEquals(2,DummyAuditLogger.logCount);
}
finally {
cluster.shutdown();
}
}
BooleanVerifierEqualityVerifierHybridVerifier
/**
* Tests that AuditLogger works as expected.
*/
@Test public void testAuditLogger() throws IOException {
Configuration conf=new HdfsConfiguration();
conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,DummyAuditLogger.class.getName());
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitClusterUp();
assertTrue(DummyAuditLogger.initialized);
DummyAuditLogger.resetLogCount();
FileSystem fs=cluster.getFileSystem();
long time=System.currentTimeMillis();
fs.setTimes(new Path("/"),time,time);
assertEquals(1,DummyAuditLogger.logCount);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test NameNode.getBlockLocations(..) on reading un-closed files.
*/
@Test public void testGetBlockLocations() throws IOException {
final NamenodeProtocols namenode=cluster.getNameNodeRpc();
final Path p=new Path(BASE_DIR,"file2.dat");
final String src=p.toString();
final FSDataOutputStream out=TestFileCreation.createFile(hdfs,p,3);
int len=BLOCK_SIZE >>> 1;
writeFile(p,out,len);
for (int i=1; i < NUM_BLOCKS; ) {
final LocatedBlocks lb=namenode.getBlockLocations(src,0,len);
final List blocks=lb.getLocatedBlocks();
assertEquals(i,blocks.size());
final Block b=blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
assertTrue(b instanceof BlockInfoUnderConstruction);
if (++i < NUM_BLOCKS) {
writeFile(p,out,BLOCK_SIZE);
len+=BLOCK_SIZE;
}
}
out.close();
}
InternalCallVerifierEqualityVerifier
@Test(timeout=120000) public void testWaitForCachedReplicas() throws Exception {
FileSystemTestHelper helper=new FileSystemTestHelper();
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
return ((namenode.getNamesystem().getCacheCapacity() == (NUM_DATANODES * CACHE_CAPACITY)) && (namenode.getNamesystem().getCacheUsed() == 0));
}
}
,500,60000);
NamenodeProtocols nnRpc=namenode.getRpcServer();
DataNode dn0=cluster.getDataNodes().get(0);
String bpid=cluster.getNamesystem().getBlockPoolId();
LinkedList bogusBlockIds=new LinkedList();
bogusBlockIds.add(999999L);
nnRpc.cacheReport(dn0.getDNRegistrationForBP(bpid),bpid,bogusBlockIds);
Path rootDir=helper.getDefaultWorkingDirectory(dfs);
final String pool="friendlyPool";
nnRpc.addCachePool(new CachePoolInfo("friendlyPool"));
final int numFiles=2;
final int numBlocksPerFile=2;
final List paths=new ArrayList(numFiles);
for (int i=0; i < numFiles; i++) {
Path p=new Path(rootDir,"testCachePaths-" + i);
FileSystemTestHelper.createFile(dfs,p,numBlocksPerFile,(int)BLOCK_SIZE);
paths.add(p.toUri().getPath());
}
waitForCachedBlocks(namenode,0,0,"testWaitForCachedReplicas:0");
int expected=0;
for (int i=0; i < numFiles; i++) {
CacheDirectiveInfo directive=new CacheDirectiveInfo.Builder().setPath(new Path(paths.get(i))).setPool(pool).build();
nnRpc.addCacheDirective(directive,EnumSet.noneOf(CacheFlag.class));
expected+=numBlocksPerFile;
waitForCachedBlocks(namenode,expected,expected,"testWaitForCachedReplicas:1");
}
DatanodeInfo[] live=dfs.getDataNodeStats(DatanodeReportType.LIVE);
assertEquals("Unexpected number of live nodes",NUM_DATANODES,live.length);
long totalUsed=0;
for ( DatanodeInfo dn : live) {
final long cacheCapacity=dn.getCacheCapacity();
final long cacheUsed=dn.getCacheUsed();
final long cacheRemaining=dn.getCacheRemaining();
assertEquals("Unexpected cache capacity",CACHE_CAPACITY,cacheCapacity);
assertEquals("Capacity not equal to used + remaining",cacheCapacity,cacheUsed + cacheRemaining);
assertEquals("Remaining not equal to capacity - used",cacheCapacity - cacheUsed,cacheRemaining);
totalUsed+=cacheUsed;
}
assertEquals(expected * BLOCK_SIZE,totalUsed);
RemoteIterator entries=new CacheDirectiveIterator(nnRpc,null);
for (int i=0; i < numFiles; i++) {
CacheDirectiveEntry entry=entries.next();
nnRpc.removeCacheDirective(entry.getInfo().getId());
expected-=numBlocksPerFile;
waitForCachedBlocks(namenode,expected,expected,"testWaitForCachedReplicas:2");
}
}
UtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=60000) public void testCreateAndModifyPools() throws Exception {
String poolName="pool1";
String ownerName="abc";
String groupName="123";
FsPermission mode=new FsPermission((short)0755);
long limit=150;
dfs.addCachePool(new CachePoolInfo(poolName).setOwnerName(ownerName).setGroupName(groupName).setMode(mode).setLimit(limit));
RemoteIterator iter=dfs.listCachePools();
CachePoolInfo info=iter.next().getInfo();
assertEquals(poolName,info.getPoolName());
assertEquals(ownerName,info.getOwnerName());
assertEquals(groupName,info.getGroupName());
ownerName="def";
groupName="456";
mode=new FsPermission((short)0700);
limit=151;
dfs.modifyCachePool(new CachePoolInfo(poolName).setOwnerName(ownerName).setGroupName(groupName).setMode(mode).setLimit(limit));
iter=dfs.listCachePools();
info=iter.next().getInfo();
assertEquals(poolName,info.getPoolName());
assertEquals(ownerName,info.getOwnerName());
assertEquals(groupName,info.getGroupName());
assertEquals(mode,info.getMode());
assertEquals(limit,(long)info.getLimit());
dfs.removeCachePool(poolName);
iter=dfs.listCachePools();
assertFalse("expected no cache pools after deleting pool",iter.hasNext());
proto.listCachePools(null);
try {
proto.removeCachePool("pool99");
fail("expected to get an exception when " + "removing a non-existent pool.");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Cannot remove non-existent",ioe);
}
try {
proto.removeCachePool(poolName);
fail("expected to get an exception when " + "removing a non-existent pool.");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Cannot remove non-existent",ioe);
}
iter=dfs.listCachePools();
assertFalse("expected no cache pools after deleting pool",iter.hasNext());
}
UtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testMaxRelativeExpiry() throws Exception {
try {
dfs.addCachePool(new CachePoolInfo("failpool").setMaxRelativeExpiryMs(-1l));
fail("Added a pool with a negative max expiry.");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("negative",e);
}
try {
dfs.addCachePool(new CachePoolInfo("failpool").setMaxRelativeExpiryMs(Long.MAX_VALUE - 1));
fail("Added a pool with too big of a max expiry.");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("too big",e);
}
CachePoolInfo coolPool=new CachePoolInfo("coolPool");
final long poolExpiration=1000 * 60 * 10l;
dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(poolExpiration));
RemoteIterator poolIt=dfs.listCachePools();
CachePoolInfo listPool=poolIt.next().getInfo();
assertFalse("Should only be one pool",poolIt.hasNext());
assertEquals("Expected max relative expiry to match set value",poolExpiration,listPool.getMaxRelativeExpiryMs().longValue());
try {
dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(-1l));
fail("Added a pool with a negative max expiry.");
}
catch ( InvalidRequestException e) {
assertExceptionContains("negative",e);
}
try {
dfs.modifyCachePool(coolPool.setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER + 1));
fail("Added a pool with too big of a max expiry.");
}
catch ( InvalidRequestException e) {
assertExceptionContains("too big",e);
}
CacheDirectiveInfo defaultExpiry=new CacheDirectiveInfo.Builder().setPath(new Path("/blah")).setPool(coolPool.getPoolName()).build();
dfs.addCacheDirective(defaultExpiry);
RemoteIterator dirIt=dfs.listCacheDirectives(defaultExpiry);
CacheDirectiveInfo listInfo=dirIt.next().getInfo();
assertFalse("Should only have one entry in listing",dirIt.hasNext());
long listExpiration=listInfo.getExpiration().getAbsoluteMillis() - new Date().getTime();
assertTrue("Directive expiry should be approximately the pool's max expiry",Math.abs(listExpiration - poolExpiration) < 10 * 1000);
CacheDirectiveInfo.Builder builder=new CacheDirectiveInfo.Builder().setPath(new Path("/lolcat")).setPool(coolPool.getPoolName());
try {
dfs.addCacheDirective(builder.setExpiration(Expiration.newRelative(poolExpiration + 1)).build());
fail("Added a directive that exceeds pool's max relative expiration");
}
catch ( InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration",e);
}
try {
dfs.addCacheDirective(builder.setExpiration(Expiration.newAbsolute(new Date().getTime() + poolExpiration + (10 * 1000))).build());
fail("Added a directive that exceeds pool's max relative expiration");
}
catch ( InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration",e);
}
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newRelative(poolExpiration + 1)).build());
fail("Modified a directive to exceed pool's max relative expiration");
}
catch ( InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration",e);
}
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newAbsolute(new Date().getTime() + poolExpiration + (10 * 1000))).build());
fail("Modified a directive to exceed pool's max relative expiration");
}
catch ( InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration",e);
}
try {
dfs.addCacheDirective(builder.setExpiration(Expiration.newRelative(Long.MAX_VALUE)).build());
fail("Added a directive with a gigantic max value");
}
catch ( IllegalArgumentException e) {
assertExceptionContains("is too far in the future",e);
}
try {
dfs.addCacheDirective(builder.setExpiration(Expiration.newAbsolute(Long.MAX_VALUE)).build());
fail("Added a directive with a gigantic max value");
}
catch ( InvalidRequestException e) {
assertExceptionContains("is too far in the future",e);
}
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.NEVER).build());
fail("Modified a directive to exceed pool's max relative expiration");
}
catch ( InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration",e);
}
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newAbsolute(Long.MAX_VALUE)).build());
fail("Modified a directive to exceed pool's max relative expiration");
}
catch ( InvalidRequestException e) {
assertExceptionContains("is too far in the future",e);
}
CachePoolInfo destPool=new CachePoolInfo("destPool");
dfs.addCachePool(destPool.setMaxRelativeExpiryMs(poolExpiration / 2));
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setPool(destPool.getPoolName()).build());
fail("Modified a directive to a pool with a lower max expiration");
}
catch ( InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration",e);
}
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setPool(destPool.getPoolName()).setExpiration(Expiration.newRelative(poolExpiration / 2)).build());
dirIt=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool(destPool.getPoolName()).build());
listInfo=dirIt.next().getInfo();
listExpiration=listInfo.getExpiration().getAbsoluteMillis() - new Date().getTime();
assertTrue("Unexpected relative expiry " + listExpiration + " expected approximately "+ poolExpiration / 2,Math.abs(poolExpiration / 2 - listExpiration) < 10 * 1000);
dfs.modifyCachePool(destPool.setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER));
poolIt=dfs.listCachePools();
listPool=poolIt.next().getInfo();
while (!listPool.getPoolName().equals(destPool.getPoolName())) {
listPool=poolIt.next().getInfo();
}
assertEquals("Expected max relative expiry to match set value",CachePoolInfo.RELATIVE_EXPIRY_NEVER,listPool.getMaxRelativeExpiryMs().longValue());
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(listInfo.getId()).setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER)).build());
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(listInfo.getId()).setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER - 1)).build());
}
UtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=120000) public void testLimit() throws Exception {
try {
dfs.addCachePool(new CachePoolInfo("poolofnegativity").setLimit(-99l));
fail("Should not be able to set a negative limit");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("negative",e);
}
final String destiny="poolofdestiny";
final Path path1=new Path("/destiny");
DFSTestUtil.createFile(dfs,path1,2 * BLOCK_SIZE,(short)1,0x9494);
final CachePoolInfo poolInfo=new CachePoolInfo(destiny).setLimit(2 * BLOCK_SIZE - 1);
dfs.addCachePool(poolInfo);
final CacheDirectiveInfo info1=new CacheDirectiveInfo.Builder().setPool(destiny).setPath(path1).build();
try {
dfs.addCacheDirective(info1);
fail("Should not be able to cache when there is no more limit");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("remaining capacity",e);
}
poolInfo.setLimit(2 * BLOCK_SIZE);
dfs.modifyCachePool(poolInfo);
long id1=dfs.addCacheDirective(info1);
waitForCachePoolStats(dfs,2 * BLOCK_SIZE,2 * BLOCK_SIZE,1,1,poolInfo,"testLimit:1");
final Path path2=new Path("/failure");
DFSTestUtil.createFile(dfs,path2,BLOCK_SIZE,(short)1,0x9495);
try {
dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool(destiny).setPath(path2).build(),EnumSet.noneOf(CacheFlag.class));
fail("Should not be able to add another cached file");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("remaining capacity",e);
}
poolInfo.setLimit(BLOCK_SIZE);
dfs.modifyCachePool(poolInfo);
waitForCachePoolStats(dfs,2 * BLOCK_SIZE,0,1,0,poolInfo,"testLimit:2");
RemoteIterator it=dfs.listCachePools();
assertTrue("Expected a cache pool",it.hasNext());
CachePoolStats stats=it.next().getStats();
assertEquals("Overlimit bytes should be difference of needed and limit",BLOCK_SIZE,stats.getBytesOverlimit());
CachePoolInfo inadequate=new CachePoolInfo("poolofinadequacy").setLimit(BLOCK_SIZE);
dfs.addCachePool(inadequate);
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(info1).setId(id1).setPool(inadequate.getPoolName()).build(),EnumSet.noneOf(CacheFlag.class));
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("remaining capacity",e);
}
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(info1).setId(id1).setPool(inadequate.getPoolName()).build(),EnumSet.of(CacheFlag.FORCE));
dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool(inadequate.getPoolName()).setPath(path1).build(),EnumSet.of(CacheFlag.FORCE));
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=60000) public void testCacheManagerRestart() throws Exception {
SecondaryNameNode secondary=null;
try {
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,"0.0.0.0:0");
secondary=new SecondaryNameNode(conf);
final String pool="poolparty";
String groupName="partygroup";
FsPermission mode=new FsPermission((short)0777);
long limit=747;
dfs.addCachePool(new CachePoolInfo(pool).setGroupName(groupName).setMode(mode).setLimit(limit));
RemoteIterator pit=dfs.listCachePools();
assertTrue("No cache pools found",pit.hasNext());
CachePoolInfo info=pit.next().getInfo();
assertEquals(pool,info.getPoolName());
assertEquals(groupName,info.getGroupName());
assertEquals(mode,info.getMode());
assertEquals(limit,(long)info.getLimit());
assertFalse("Unexpected # of cache pools found",pit.hasNext());
int numEntries=10;
String entryPrefix="/party-";
long prevId=-1;
final Date expiry=new Date();
for (int i=0; i < numEntries; i++) {
prevId=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path(entryPrefix + i)).setPool(pool).setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiry.getTime())).build());
}
RemoteIterator dit=dfs.listCacheDirectives(null);
for (int i=0; i < numEntries; i++) {
assertTrue("Unexpected # of cache entries: " + i,dit.hasNext());
CacheDirectiveInfo cd=dit.next().getInfo();
assertEquals(i + 1,cd.getId().longValue());
assertEquals(entryPrefix + i,cd.getPath().toUri().getPath());
assertEquals(pool,cd.getPool());
}
assertFalse("Unexpected # of cache directives found",dit.hasNext());
secondary.doCheckpoint();
final String imagePool="imagePool";
dfs.addCachePool(new CachePoolInfo(imagePool));
prevId=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/image")).setPool(imagePool).build());
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
dfs.saveNamespace();
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
boolean fetchImage=secondary.doCheckpoint();
assertTrue("Secondary should have fetched a new fsimage from NameNode",fetchImage);
dfs.removeCachePool(imagePool);
cluster.restartNameNode();
pit=dfs.listCachePools();
assertTrue("No cache pools found",pit.hasNext());
info=pit.next().getInfo();
assertEquals(pool,info.getPoolName());
assertEquals(pool,info.getPoolName());
assertEquals(groupName,info.getGroupName());
assertEquals(mode,info.getMode());
assertEquals(limit,(long)info.getLimit());
assertFalse("Unexpected # of cache pools found",pit.hasNext());
dit=dfs.listCacheDirectives(null);
for (int i=0; i < numEntries; i++) {
assertTrue("Unexpected # of cache entries: " + i,dit.hasNext());
CacheDirectiveInfo cd=dit.next().getInfo();
assertEquals(i + 1,cd.getId().longValue());
assertEquals(entryPrefix + i,cd.getPath().toUri().getPath());
assertEquals(pool,cd.getPool());
assertEquals(expiry.getTime(),cd.getExpiration().getMillis());
}
assertFalse("Unexpected # of cache directives found",dit.hasNext());
long nextId=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/foobar")).setPool(pool).build());
assertEquals(prevId + 1,nextId);
}
finally {
if (secondary != null) {
secondary.shutdown();
}
}
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=60000) public void testAddRemoveDirectives() throws Exception {
proto.addCachePool(new CachePoolInfo("pool1").setMode(new FsPermission((short)0777)));
proto.addCachePool(new CachePoolInfo("pool2").setMode(new FsPermission((short)0777)));
proto.addCachePool(new CachePoolInfo("pool3").setMode(new FsPermission((short)0777)));
proto.addCachePool(new CachePoolInfo("pool4").setMode(new FsPermission((short)0)));
CacheDirectiveInfo alpha=new CacheDirectiveInfo.Builder().setPath(new Path("/alpha")).setPool("pool1").build();
CacheDirectiveInfo beta=new CacheDirectiveInfo.Builder().setPath(new Path("/beta")).setPool("pool2").build();
CacheDirectiveInfo delta=new CacheDirectiveInfo.Builder().setPath(new Path("/delta")).setPool("pool1").build();
long alphaId=addAsUnprivileged(alpha);
long alphaId2=addAsUnprivileged(alpha);
assertFalse("Expected to get unique directives when re-adding an " + "existing CacheDirectiveInfo",alphaId == alphaId2);
long betaId=addAsUnprivileged(beta);
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/unicorn")).setPool("no_such_pool").build());
fail("expected an error when adding to a non-existent pool.");
}
catch ( InvalidRequestException ioe) {
GenericTestUtils.assertExceptionContains("Unknown pool",ioe);
}
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/blackhole")).setPool("pool4").build());
fail("expected an error when adding to a pool with " + "mode 0 (no permissions for anyone).");
}
catch ( AccessControlException e) {
GenericTestUtils.assertExceptionContains("Permission denied while accessing pool",e);
}
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/illegal:path/")).setPool("pool1").build());
fail("expected an error when adding a malformed path " + "to the cache directives.");
}
catch ( IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("is not a valid DFS filename",e);
}
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/emptypoolname")).setReplication((short)1).setPool("").build());
fail("expected an error when adding a cache " + "directive with an empty pool name.");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("Invalid empty pool name",e);
}
long deltaId=addAsUnprivileged(delta);
long relativeId=addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("relative")).setPool("pool1").build());
RemoteIterator iter;
iter=dfs.listCacheDirectives(null);
validateListAll(iter,alphaId,alphaId2,betaId,deltaId,relativeId);
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool3").build());
assertFalse(iter.hasNext());
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool1").build());
validateListAll(iter,alphaId,alphaId2,deltaId,relativeId);
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool2").build());
validateListAll(iter,betaId);
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setId(alphaId2).build());
validateListAll(iter,alphaId2);
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setId(relativeId).build());
validateListAll(iter,relativeId);
dfs.removeCacheDirective(betaId);
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool2").build());
assertFalse(iter.hasNext());
try {
dfs.removeCacheDirective(betaId);
fail("expected an error when removing a non-existent ID");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("No directive with ID",e);
}
try {
proto.removeCacheDirective(-42l);
fail("expected an error when removing a negative ID");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("Invalid negative ID",e);
}
try {
proto.removeCacheDirective(43l);
fail("expected an error when removing a non-existent ID");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("No directive with ID",e);
}
dfs.removeCacheDirective(alphaId);
dfs.removeCacheDirective(alphaId2);
dfs.removeCacheDirective(deltaId);
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(relativeId).setReplication((short)555).build());
iter=dfs.listCacheDirectives(null);
assertTrue(iter.hasNext());
CacheDirectiveInfo modified=iter.next().getInfo();
assertEquals(relativeId,modified.getId().longValue());
assertEquals((short)555,modified.getReplication().shortValue());
dfs.removeCacheDirective(relativeId);
iter=dfs.listCacheDirectives(null);
assertFalse(iter.hasNext());
CacheDirectiveInfo directive=new CacheDirectiveInfo.Builder().setPath(new Path(".")).setPool("pool1").build();
long id=dfs.addCacheDirective(directive);
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(directive).setId(id).setReplication((short)2).build());
dfs.removeCacheDirective(id);
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test(timeout=60000) public void testListCachePoolPermissions() throws Exception {
final UserGroupInformation myUser=UserGroupInformation.createRemoteUser("myuser");
final DistributedFileSystem myDfs=(DistributedFileSystem)DFSTestUtil.getFileSystemAs(myUser,conf);
final String poolName="poolparty";
dfs.addCachePool(new CachePoolInfo(poolName).setMode(new FsPermission((short)0700)));
RemoteIterator it=myDfs.listCachePools();
CachePoolInfo info=it.next().getInfo();
assertFalse(it.hasNext());
assertEquals("Expected pool name",poolName,info.getPoolName());
assertNull("Unexpected owner name",info.getOwnerName());
assertNull("Unexpected group name",info.getGroupName());
assertNull("Unexpected mode",info.getMode());
assertNull("Unexpected limit",info.getLimit());
final long limit=99;
dfs.modifyCachePool(new CachePoolInfo(poolName).setOwnerName(myUser.getShortUserName()).setLimit(limit));
it=myDfs.listCachePools();
info=it.next().getInfo();
assertFalse(it.hasNext());
assertEquals("Expected pool name",poolName,info.getPoolName());
assertEquals("Mismatched owner name",myUser.getShortUserName(),info.getOwnerName());
assertNotNull("Expected group name",info.getGroupName());
assertEquals("Mismatched mode",(short)0700,info.getMode().toShort());
assertEquals("Mismatched limit",limit,(long)info.getLimit());
}
InternalCallVerifierEqualityVerifier
/**
* Test case where two secondary namenodes are checkpointing the same
* NameNode. This differs from {@link #testMultipleSecondaryNamenodes()}since that test runs against two distinct NNs.
* This case tests the following interleaving:
* - 2NN A) calls rollEdits()
* - 2NN B) calls rollEdits()
* - 2NN A) paused at getRemoteEditLogManifest()
* - 2NN B) calls getRemoteEditLogManifest() (returns up to txid 4)
* - 2NN B) uploads checkpoint fsimage_4
* - 2NN A) allowed to proceed, also returns up to txid 4
* - 2NN A) uploads checkpoint fsimage_4 as well, should fail gracefully
* It verifies that one of the two gets an error that it's uploading a
* duplicate checkpoint, and the other one succeeds.
*/
@Test public void testMultipleSecondaryNNsAgainstSameNN2() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
SecondaryNameNode secondary1=null, secondary2=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
secondary1=startSecondaryNameNode(conf,1);
secondary2=startSecondaryNameNode(conf,2);
final NamenodeProtocol origNN=secondary1.getNameNode();
final Answer delegator=new GenericTestUtils.DelegateAnswer(origNN);
NamenodeProtocol spyNN=Mockito.mock(NamenodeProtocol.class,delegator);
DelayAnswer delayer=new DelayAnswer(LOG){
@Override protected Object passThrough( InvocationOnMock invocation) throws Throwable {
return delegator.answer(invocation);
}
}
;
secondary1.setNameNode(spyNN);
Mockito.doAnswer(delayer).when(spyNN).getEditLogManifest(Mockito.anyLong());
DoCheckpointThread checkpointThread=new DoCheckpointThread(secondary1);
checkpointThread.start();
delayer.waitForCall();
secondary2.doCheckpoint();
NNStorage storage=cluster.getNameNode().getFSImage().getStorage();
assertEquals(4,storage.getMostRecentCheckpointTxId());
delayer.proceed();
checkpointThread.join();
checkpointThread.propagateExceptions();
assertEquals(4,storage.getMostRecentCheckpointTxId());
secondary2.doCheckpoint();
assertEquals(6,storage.getMostRecentCheckpointTxId());
assertNNHasCheckpoints(cluster,ImmutableList.of(4,6));
secondary1.setNameNode(origNN);
secondary1.doCheckpoint();
assertEquals(8,storage.getMostRecentCheckpointTxId());
assertParallelFilesInvariant(cluster,ImmutableList.of(secondary1,secondary2));
assertNNHasCheckpoints(cluster,ImmutableList.of(6,8));
}
finally {
cleanup(secondary1);
secondary1=null;
cleanup(secondary2);
secondary2=null;
cleanup(cluster);
cluster=null;
}
}
UtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testCommandLineParsing() throws ParseException {
SecondaryNameNode.CommandLineOpts opts=new SecondaryNameNode.CommandLineOpts();
opts.parse();
assertNull(opts.getCommand());
opts.parse("-checkpoint");
assertEquals(SecondaryNameNode.CommandLineOpts.Command.CHECKPOINT,opts.getCommand());
assertFalse(opts.shouldForceCheckpoint());
opts.parse("-checkpoint","force");
assertEquals(SecondaryNameNode.CommandLineOpts.Command.CHECKPOINT,opts.getCommand());
assertTrue(opts.shouldForceCheckpoint());
opts.parse("-geteditsize");
assertEquals(SecondaryNameNode.CommandLineOpts.Command.GETEDITSIZE,opts.getCommand());
opts.parse("-format");
assertTrue(opts.shouldFormat());
try {
opts.parse("-geteditsize","-checkpoint");
fail("Should have failed bad parsing for two actions");
}
catch ( ParseException e) {
LOG.warn("Encountered ",e);
}
try {
opts.parse("-checkpoint","xx");
fail("Should have failed for bad checkpoint arg");
}
catch ( ParseException e) {
LOG.warn("Encountered ",e);
}
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test case where the NN is configured with a name-only and an edits-only
* dir, with storage-restore turned on. In this case, if the name-only dir
* disappears and comes back, a new checkpoint after it has been restored
* should function correctly.
* @throws Exception
*/
@Test public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception {
MiniDFSCluster cluster=null;
SecondaryNameNode secondary=null;
File currentDir=null;
Configuration conf=new HdfsConfiguration();
File base_dir=new File(MiniDFSCluster.getBaseDirectory());
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY,true);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,MiniDFSCluster.getBaseDirectory() + "/name-only");
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,MiniDFSCluster.getBaseDirectory() + "/edits-only");
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,fileAsURI(new File(base_dir,"namesecondary1")).toString());
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).manageNameDfsDirs(false).build();
secondary=startSecondaryNameNode(conf);
secondary.doCheckpoint();
NamenodeProtocols nn=cluster.getNameNodeRpc();
NNStorage storage=cluster.getNameNode().getFSImage().getStorage();
StorageDirectory sd0=storage.getStorageDir(0);
assertEquals(NameNodeDirType.IMAGE,sd0.getStorageDirType());
currentDir=sd0.getCurrentDir();
assertEquals(0,FileUtil.chmod(currentDir.getAbsolutePath(),"000"));
try {
secondary.doCheckpoint();
fail("Did not fail to checkpoint when there are no valid storage dirs");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("No targets in destination storage",ioe);
}
assertEquals(0,FileUtil.chmod(currentDir.getAbsolutePath(),"755"));
nn.restoreFailedStorage("true");
nn.rollEditLog();
secondary.doCheckpoint();
assertNNHasCheckpoints(cluster,ImmutableList.of(8));
assertParallelFilesInvariant(cluster,ImmutableList.of(secondary));
}
finally {
if (currentDir != null) {
FileUtil.chmod(currentDir.getAbsolutePath(),"755");
}
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that the secondary doesn't have to re-download image
* if it hasn't changed.
*/
@Test public void testSecondaryImageDownload() throws IOException {
LOG.info("Starting testSecondaryImageDownload");
Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,"0.0.0.0:0");
Path dir=new Path("/checkpoint");
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build();
cluster.waitActive();
FileSystem fileSys=cluster.getFileSystem();
FSImage image=cluster.getNameNode().getFSImage();
SecondaryNameNode secondary=null;
try {
assertTrue(!fileSys.exists(dir));
secondary=startSecondaryNameNode(conf);
File secondaryDir=new File(MiniDFSCluster.getBaseDirectory(),"namesecondary1");
File secondaryCurrent=new File(secondaryDir,"current");
long expectedTxIdToDownload=cluster.getNameNode().getFSImage().getStorage().getMostRecentCheckpointTxId();
File secondaryFsImageBefore=new File(secondaryCurrent,NNStorage.getImageFileName(expectedTxIdToDownload));
File secondaryFsImageAfter=new File(secondaryCurrent,NNStorage.getImageFileName(expectedTxIdToDownload + 2));
assertFalse("Secondary should start with empty current/ dir " + "but " + secondaryFsImageBefore + " exists",secondaryFsImageBefore.exists());
assertTrue("Secondary should have loaded an image",secondary.doCheckpoint());
assertTrue("Secondary should have downloaded original image",secondaryFsImageBefore.exists());
assertTrue("Secondary should have created a new image",secondaryFsImageAfter.exists());
long fsimageLength=secondaryFsImageBefore.length();
assertEquals("Image size should not have changed",fsimageLength,secondaryFsImageAfter.length());
fileSys.mkdirs(dir);
assertFalse("Another checkpoint should not have to re-load image",secondary.doCheckpoint());
for ( StorageDirectory sd : image.getStorage().dirIterable(NameNodeDirType.IMAGE)) {
File imageFile=NNStorage.getImageFile(sd,NameNodeFile.IMAGE,expectedTxIdToDownload + 5);
assertTrue("Image size increased",imageFile.length() > fsimageLength);
}
}
finally {
fileSys.close();
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
}
InternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test that, if the edits dir is separate from the name dir, it is
* properly locked.
*/
@Test public void testSeparateEditsDirLocking() throws IOException {
Configuration conf=new HdfsConfiguration();
File nameDir=new File(MiniDFSCluster.getBaseDirectory(),"name");
File editsDir=new File(MiniDFSCluster.getBaseDirectory(),"testSeparateEditsDirLocking");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameDir.getAbsolutePath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,editsDir.getAbsolutePath());
MiniDFSCluster cluster=null;
StorageDirectory savedSd=null;
try {
cluster=new MiniDFSCluster.Builder(conf).manageNameDfsDirs(false).numDataNodes(0).build();
NNStorage storage=cluster.getNameNode().getFSImage().getStorage();
for ( StorageDirectory sd : storage.dirIterable(NameNodeDirType.EDITS)) {
assertEquals(editsDir.getAbsoluteFile(),sd.getRoot());
assertLockFails(sd);
savedSd=sd;
}
}
finally {
cleanup(cluster);
cluster=null;
}
assertNotNull(savedSd);
assertClusterStartFailsWhenDirLocked(conf,savedSd);
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Starts two namenodes and two secondary namenodes, verifies that secondary
* namenodes are configured correctly to talk to their respective namenodes
* and can do the checkpoint.
* @throws IOException
*/
@Test public void testMultipleSecondaryNamenodes() throws IOException {
Configuration conf=new HdfsConfiguration();
String nameserviceId1="ns1";
String nameserviceId2="ns2";
conf.set(DFSConfigKeys.DFS_NAMESERVICES,nameserviceId1 + "," + nameserviceId2);
MiniDFSCluster cluster=null;
SecondaryNameNode secondary1=null;
SecondaryNameNode secondary2=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(conf.get(DFSConfigKeys.DFS_NAMESERVICES))).build();
Configuration snConf1=new HdfsConfiguration(cluster.getConfiguration(0));
Configuration snConf2=new HdfsConfiguration(cluster.getConfiguration(1));
InetSocketAddress nn1RpcAddress=cluster.getNameNode(0).getNameNodeAddress();
InetSocketAddress nn2RpcAddress=cluster.getNameNode(1).getNameNodeAddress();
String nn1=nn1RpcAddress.getHostName() + ":" + nn1RpcAddress.getPort();
String nn2=nn2RpcAddress.getHostName() + ":" + nn2RpcAddress.getPort();
snConf1.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,"");
snConf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,"");
snConf1.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,nameserviceId1),nn1);
snConf2.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,nameserviceId2),nn2);
secondary1=startSecondaryNameNode(snConf1);
secondary2=startSecondaryNameNode(snConf2);
assertEquals(secondary1.getNameNodeAddress().getPort(),nn1RpcAddress.getPort());
assertEquals(secondary2.getNameNodeAddress().getPort(),nn2RpcAddress.getPort());
assertTrue(secondary1.getNameNodeAddress().getPort() != secondary2.getNameNodeAddress().getPort());
secondary1.doCheckpoint();
secondary2.doCheckpoint();
}
finally {
cleanup(secondary1);
secondary1=null;
cleanup(secondary2);
secondary2=null;
cleanup(cluster);
cluster=null;
}
}
InternalCallVerifierEqualityVerifier
/**
* Test case where two secondary namenodes are checkpointing the same
* NameNode. This differs from {@link #testMultipleSecondaryNamenodes()}since that test runs against two distinct NNs.
* This case tests the following interleaving:
* - 2NN A downloads image (up to txid 2)
* - 2NN A about to save its own checkpoint
* - 2NN B downloads image (up to txid 4)
* - 2NN B uploads checkpoint (txid 4)
* - 2NN A uploads checkpoint (txid 2)
* It verifies that this works even though the earlier-txid checkpoint gets
* uploaded after the later-txid checkpoint.
*/
@Test public void testMultipleSecondaryNNsAgainstSameNN() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
SecondaryNameNode secondary1=null, secondary2=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
secondary1=startSecondaryNameNode(conf,1);
secondary2=startSecondaryNameNode(conf,2);
CheckpointStorage spyImage1=spyOnSecondaryImage(secondary1);
DelayAnswer delayer=new DelayAnswer(LOG);
Mockito.doAnswer(delayer).when(spyImage1).saveFSImageInAllDirs(Mockito.any(),Mockito.anyLong());
DoCheckpointThread checkpointThread=new DoCheckpointThread(secondary1);
checkpointThread.start();
delayer.waitForCall();
secondary2.doCheckpoint();
delayer.proceed();
checkpointThread.join();
checkpointThread.propagateExceptions();
NNStorage storage=cluster.getNameNode().getFSImage().getStorage();
assertEquals(4,storage.getMostRecentCheckpointTxId());
assertNNHasCheckpoints(cluster,ImmutableList.of(2,4));
secondary2.doCheckpoint();
assertEquals(6,storage.getMostRecentCheckpointTxId());
assertParallelFilesInvariant(cluster,ImmutableList.of(secondary1,secondary2));
assertNNHasCheckpoints(cluster,ImmutableList.of(4,6));
}
finally {
cleanup(secondary1);
secondary1=null;
cleanup(secondary2);
secondary2=null;
if (cluster != null) {
cluster.shutdown();
cluster=null;
}
}
}
APIUtilityVerifierEqualityVerifier
/**
* Regression test for HDFS-3678 "Edit log files are never being purged from 2NN"
*/
@Test public void testSecondaryPurgesEditLogs() throws IOException {
MiniDFSCluster cluster=null;
SecondaryNameNode secondary=null;
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY,0);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
FileSystem fs=cluster.getFileSystem();
fs.mkdirs(new Path("/foo"));
secondary=startSecondaryNameNode(conf);
for (int i=0; i < 5; i++) {
secondary.doCheckpoint();
}
List checkpointDirs=getCheckpointCurrentDirs(secondary);
for ( File checkpointDir : checkpointDirs) {
List editsFiles=FileJournalManager.matchEditLogs(checkpointDir);
assertEquals("Edit log files were not purged from 2NN",1,editsFiles.size());
}
}
finally {
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Regression test for HDFS-3849. This makes sure that when we re-load the
* FSImage in the 2NN, we clear the existing leases.
*/
@Test public void testSecondaryNameNodeWithSavedLeases() throws IOException {
MiniDFSCluster cluster=null;
SecondaryNameNode secondary=null;
FSDataOutputStream fos=null;
Configuration conf=new HdfsConfiguration();
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build();
FileSystem fs=cluster.getFileSystem();
fos=fs.create(new Path("tmpfile"));
fos.write(new byte[]{0,1,2,3});
fos.hflush();
assertEquals(1,cluster.getNamesystem().getLeaseManager().countLease());
secondary=startSecondaryNameNode(conf);
assertEquals(0,secondary.getFSNamesystem().getLeaseManager().countLease());
secondary.doCheckpoint();
assertEquals(1,secondary.getFSNamesystem().getLeaseManager().countLease());
fos.close();
fos=null;
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER,false);
cluster.getNameNodeRpc().saveNamespace();
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE,false);
secondary.doCheckpoint();
assertEquals(0,secondary.getFSNamesystem().getLeaseManager().countLease());
}
finally {
if (fos != null) {
fos.close();
}
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
}
APIUtilityVerifierBranchVerifierUtilityVerifierEqualityVerifierHybridVerifier
/**
* Test namenode format with -format -force -clusterid option when name
* directory exists. Format should succeed.
* @throws IOException
*/
@Test public void testFormatWithForceAndClusterId() throws IOException {
if (!hdfsDir.mkdirs()) {
fail("Failed to create dir " + hdfsDir.getPath());
}
String myId="testFormatWithForceAndClusterId";
String[] argv={"-format","-force","-clusterid",myId};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have succeeded",0,e.status);
}
String cId=getClusterId(config);
assertEquals("ClusterIds do not match",myId,cId);
}
APIUtilityVerifierBranchVerifierUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test namenode format with -format -force options when name directory
* exists. Format should succeed.
* @throws IOException
*/
@Test public void testFormatWithForce() throws IOException {
if (!hdfsDir.mkdirs()) {
fail("Failed to create dir " + hdfsDir.getPath());
}
String[] argv={"-format","-force"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have succeeded",0,e.status);
}
String cid=getClusterId(config);
assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals("")));
}
APIUtilityVerifierBranchVerifierUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test namenode format with -force -nonInteractive -force option. Format
* should succeed.
* @throws IOException
*/
@Test public void testFormatWithNonInteractiveAndForce() throws IOException {
if (!hdfsDir.mkdirs()) {
fail("Failed to create dir " + hdfsDir.getPath());
}
String[] argv={"-format","-nonInteractive","-force"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have succeeded",0,e.status);
}
String cid=getClusterId(config);
assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals("")));
}
BranchVerifierUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test namenode format with -format option when a non empty name directory
* exists. Enter N when prompted and format should be aborted.
* @throws IOException
* @throws InterruptedException
*/
@Test public void testFormatWithoutForceEnterNo() throws IOException, InterruptedException {
File data=new File(hdfsDir,"file");
if (!data.mkdirs()) {
fail("Failed to create dir " + data.getPath());
}
InputStream origIn=System.in;
ByteArrayInputStream bins=new ByteArrayInputStream("N\n".getBytes());
System.setIn(bins);
String[] argv={"-format"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should not have succeeded",1,e.status);
}
System.setIn(origIn);
File version=new File(hdfsDir,"current/VERSION");
assertFalse("Check version should not exist",version.exists());
}
APIUtilityVerifierBranchVerifierUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test namenode format with -format option when an empty name directory
* exists. Format should succeed.
* @throws IOException
*/
@Test public void testFormatWithEmptyDir() throws IOException {
if (!hdfsDir.mkdirs()) {
fail("Failed to create dir " + hdfsDir.getPath());
}
String[] argv={"-format"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have succeeded",0,e.status);
}
String cid=getClusterId(config);
assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals("")));
}
APIUtilityVerifierUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test namenode format with -format -nonInteractive options when name
* directory does not exist. Format should succeed.
* @throws IOException
*/
@Test public void testFormatWithNonInteractiveNameDirDoesNotExit() throws IOException {
String[] argv={"-format","-nonInteractive"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have succeeded",0,e.status);
}
String cid=getClusterId(config);
assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals("")));
}
APIUtilityVerifierBranchVerifierUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test namenode format with -format option when a non empty name directory
* exists. Enter Y when prompted and the format should succeed.
* @throws IOException
* @throws InterruptedException
*/
@Test public void testFormatWithoutForceEnterYes() throws IOException, InterruptedException {
File data=new File(hdfsDir,"file");
if (!data.mkdirs()) {
fail("Failed to create dir " + data.getPath());
}
InputStream origIn=System.in;
ByteArrayInputStream bins=new ByteArrayInputStream("Y\n".getBytes());
System.setIn(bins);
String[] argv={"-format"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have succeeded",0,e.status);
}
System.setIn(origIn);
String cid=getClusterId(config);
assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals("")));
}
APIUtilityVerifierUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test namenode format with -format option. Format should succeed.
* @throws IOException
*/
@Test public void testFormat() throws IOException {
String[] argv={"-format"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have succeeded",0,e.status);
}
String cid=getClusterId(config);
assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals("")));
}
BranchVerifierUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test namenode format with -format -nonInteractive options when a non empty
* name directory exists. Format should not succeed.
* @throws IOException
*/
@Test public void testFormatWithNonInteractive() throws IOException {
File data=new File(hdfsDir,"file");
if (!data.mkdirs()) {
fail("Failed to create dir " + data.getPath());
}
String[] argv={"-format","-nonInteractive"};
try {
NameNode.createNameNode(argv,config);
fail("createNameNode() did not call System.exit()");
}
catch ( ExitException e) {
assertEquals("Format should have been aborted with exit code 1",1,e.status);
}
File version=new File(hdfsDir,"current/VERSION");
assertFalse("Check version should not exist",version.exists());
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Test loading an editlog which has had both its storage fail
* on alternating rolls. Two edit log directories are created.
* The first one fails on odd rolls, the second on even. Test
* that we are able to load the entire editlog regardless.
*/
@Test public void testAlternatingJournalFailure() throws IOException {
File f1=new File(TEST_DIR + "/alternatingjournaltest0");
File f2=new File(TEST_DIR + "/alternatingjournaltest1");
List editUris=ImmutableList.of(f1.toURI(),f2.toURI());
NNStorage storage=setupEdits(editUris,10,new AbortSpec(1,0),new AbortSpec(2,1),new AbortSpec(3,0),new AbortSpec(4,1),new AbortSpec(5,0),new AbortSpec(6,1),new AbortSpec(7,0),new AbortSpec(8,1),new AbortSpec(9,0),new AbortSpec(10,1));
long totaltxnread=0;
FSEditLog editlog=getFSEditLog(storage);
editlog.initJournalsForWrite();
long startTxId=1;
Iterable editStreams=editlog.selectInputStreams(startTxId,TXNS_PER_ROLL * 11);
for ( EditLogInputStream edits : editStreams) {
FSEditLogLoader.EditLogValidation val=FSEditLogLoader.validateEditLog(edits);
long read=(val.getEndTxId() - edits.getFirstTxId()) + 1;
LOG.info("Loading edits " + edits + " read "+ read);
assertEquals(startTxId,edits.getFirstTxId());
startTxId+=read;
totaltxnread+=read;
}
editlog.close();
storage.close();
assertEquals(TXNS_PER_ROLL * 11,totaltxnread);
}
APIUtilityVerifierUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test loading an editlog with gaps. A single editlog directory
* is set up. On of the edit log files is deleted. This should
* fail when selecting the input streams as it will not be able
* to select enough streams to load up to 4*TXNS_PER_ROLL.
* There should be 4*TXNS_PER_ROLL transactions as we rolled 3
* times.
*/
@Test public void testLoadingWithGaps() throws IOException {
File f1=new File(TEST_DIR + "/gaptest0");
List editUris=ImmutableList.of(f1.toURI());
NNStorage storage=setupEdits(editUris,3);
final long startGapTxId=1 * TXNS_PER_ROLL + 1;
final long endGapTxId=2 * TXNS_PER_ROLL;
File[] files=new File(f1,"current").listFiles(new FilenameFilter(){
@Override public boolean accept( File dir, String name){
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId,endGapTxId))) {
return true;
}
return false;
}
}
);
assertEquals(1,files.length);
assertTrue(files[0].delete());
FSEditLog editlog=getFSEditLog(storage);
editlog.initJournalsForWrite();
long startTxId=1;
try {
editlog.selectInputStreams(startTxId,4 * TXNS_PER_ROLL);
fail("Should have thrown exception");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Gap in transactions. Expected to be able to read up until " + "at least txid 40 but unable to find any edit logs containing " + "txid 11",ioe);
}
}
InternalCallVerifierEqualityVerifier
@Test public void testSyncBatching() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
ExecutorService threadA=Executors.newSingleThreadExecutor();
ExecutorService threadB=Executors.newSingleThreadExecutor();
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
FSImage fsimage=namesystem.getFSImage();
final FSEditLog editLog=fsimage.getEditLog();
assertEquals("should start with only the BEGIN_LOG_SEGMENT txn synced",1,editLog.getSyncTxId());
doLogEdit(threadA,editLog,"thread-a 1");
assertEquals("logging edit without syncing should do not affect txid",1,editLog.getSyncTxId());
doLogEdit(threadB,editLog,"thread-b 1");
assertEquals("logging edit without syncing should do not affect txid",1,editLog.getSyncTxId());
doCallLogSync(threadB,editLog);
assertEquals("logSync from second thread should bump txid up to 3",3,editLog.getSyncTxId());
doCallLogSync(threadA,editLog);
assertEquals("logSync from first thread shouldn't change txid",3,editLog.getSyncTxId());
assertCounter("TransactionsBatchedInSync",1L,getMetrics("NameNodeActivity"));
}
finally {
threadA.shutdown();
threadB.shutdown();
if (fileSys != null) fileSys.close();
if (cluster != null) cluster.shutdown();
}
}
APIUtilityVerifierUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test edit log failover. If a single edit log is missing, other
* edits logs should be used instead.
*/
@Test public void testEditLogFailOverFromMissing() throws IOException {
File f1=new File(TEST_DIR + "/failover0");
File f2=new File(TEST_DIR + "/failover1");
List editUris=ImmutableList.of(f1.toURI(),f2.toURI());
NNStorage storage=setupEdits(editUris,3);
final long startErrorTxId=1 * TXNS_PER_ROLL + 1;
final long endErrorTxId=2 * TXNS_PER_ROLL;
File[] files=new File(f1,"current").listFiles(new FilenameFilter(){
@Override public boolean accept( File dir, String name){
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId,endErrorTxId))) {
return true;
}
return false;
}
}
);
assertEquals(1,files.length);
assertTrue(files[0].delete());
FSEditLog editlog=getFSEditLog(storage);
editlog.initJournalsForWrite();
long startTxId=1;
Collection streams=null;
try {
streams=editlog.selectInputStreams(startTxId,4 * TXNS_PER_ROLL);
readAllEdits(streams,startTxId);
}
catch ( IOException e) {
LOG.error("edit log failover didn't work",e);
fail("Edit log failover didn't work");
}
finally {
IOUtils.cleanup(null,streams.toArray(new EditLogInputStream[0]));
}
}
APIUtilityVerifierUtilityVerifierEqualityVerifierHybridVerifier
/**
* Test edit log failover from a corrupt edit log
*/
@Test public void testEditLogFailOverFromCorrupt() throws IOException {
File f1=new File(TEST_DIR + "/failover0");
File f2=new File(TEST_DIR + "/failover1");
List editUris=ImmutableList.of(f1.toURI(),f2.toURI());
NNStorage storage=setupEdits(editUris,3);
final long startErrorTxId=1 * TXNS_PER_ROLL + 1;
final long endErrorTxId=2 * TXNS_PER_ROLL;
File[] files=new File(f1,"current").listFiles(new FilenameFilter(){
@Override public boolean accept( File dir, String name){
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId,endErrorTxId))) {
return true;
}
return false;
}
}
);
assertEquals(1,files.length);
long fileLen=files[0].length();
LOG.debug("Corrupting Log File: " + files[0] + " len: "+ fileLen);
RandomAccessFile rwf=new RandomAccessFile(files[0],"rw");
rwf.seek(fileLen - 4);
int b=rwf.readInt();
rwf.seek(fileLen - 4);
rwf.writeInt(b + 1);
rwf.close();
FSEditLog editlog=getFSEditLog(storage);
editlog.initJournalsForWrite();
long startTxId=1;
Collection streams=null;
try {
streams=editlog.selectInputStreams(startTxId,4 * TXNS_PER_ROLL);
readAllEdits(streams,startTxId);
}
catch ( IOException e) {
LOG.error("edit log failover didn't work",e);
fail("Edit log failover didn't work");
}
finally {
IOUtils.cleanup(null,streams.toArray(new EditLogInputStream[0]));
}
}
APIUtilityVerifierEqualityVerifier
/**
* Test case for an empty edit log from a prior version of Hadoop.
*/
@Test public void testPreTxIdEditLogNoEdits() throws Exception {
FSNamesystem namesys=Mockito.mock(FSNamesystem.class);
namesys.dir=Mockito.mock(FSDirectory.class);
long numEdits=testLoad(StringUtils.hexStringToByte("ffffffed"),namesys);
assertEquals(0,numEdits);
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Test case for loading a very simple edit log from a format
* prior to the inclusion of edit transaction IDs in the log.
*/
@Test public void testPreTxidEditLogWithEdits() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final FSNamesystem namesystem=cluster.getNamesystem();
long numEdits=testLoad(HADOOP20_SOME_EDITS,namesystem);
assertEquals(3,numEdits);
HdfsFileStatus fileInfo=namesystem.getFileInfo("/myfile",false);
assertEquals("supergroup",fileInfo.getGroup());
assertEquals(3,fileInfo.getReplication());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifierEqualityVerifier
/**
* Test what happens with the following sequence:
* Thread A writes edit
* Thread B calls logSyncAll
* calls close() on stream
* Thread A calls logSync
* This sequence is legal and can occur if enterSafeMode() is closely
* followed by saveNamespace.
*/
@Test public void testBatchedSyncWithClosedLogs() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
ExecutorService threadA=Executors.newSingleThreadExecutor();
ExecutorService threadB=Executors.newSingleThreadExecutor();
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
FSImage fsimage=namesystem.getFSImage();
final FSEditLog editLog=fsimage.getEditLog();
doLogEdit(threadA,editLog,"thread-a 1");
assertEquals("logging edit without syncing should do not affect txid",1,editLog.getSyncTxId());
doCallLogSyncAll(threadB,editLog);
assertEquals("logSyncAll should sync thread A's transaction",2,editLog.getSyncTxId());
editLog.close();
doCallLogSync(threadA,editLog);
}
finally {
threadA.shutdown();
threadB.shutdown();
if (fileSys != null) fileSys.close();
if (cluster != null) cluster.shutdown();
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Tests the getEditLogManifest function using mock storage for a number
* of different situations.
*/
@Test public void testEditLogManifestMocks() throws IOException {
NNStorage storage;
FSEditLog log;
storage=mockStorageWithEdits("[1,100]|[101,200]|[201,]","[1,100]|[101,200]|[201,]");
log=getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[1,100], [101,200]]",log.getEditLogManifest(1).toString());
assertEquals("[[101,200]]",log.getEditLogManifest(101).toString());
storage=mockStorageWithEdits("[1,100]|[101,200]","[1,100]|[201,300]|[301,400]");
log=getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[1,100], [101,200], [201,300], [301,400]]",log.getEditLogManifest(1).toString());
storage=mockStorageWithEdits("[1,100]|[301,400]","[301,400]|[401,500]");
log=getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[301,400], [401,500]]",log.getEditLogManifest(1).toString());
storage=mockStorageWithEdits("[1,100]|[101,150]","[1,50]|[101,200]");
log=getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[1,100], [101,200]]",log.getEditLogManifest(1).toString());
assertEquals("[[101,200]]",log.getEditLogManifest(101).toString());
storage=mockStorageWithEdits("[1,100]|[101,]","[1,100]|[101,200]");
log=getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[1,100], [101,200]]",log.getEditLogManifest(1).toString());
assertEquals("[[101,200]]",log.getEditLogManifest(101).toString());
}
APIUtilityVerifierUtilityVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testEditChecksum() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
FSImage fsimage=namesystem.getFSImage();
final FSEditLog editLog=fsimage.getEditLog();
fileSys.mkdirs(new Path("/tmp"));
Iterator iter=fsimage.getStorage().dirIterator(NameNodeDirType.EDITS);
LinkedList sds=new LinkedList();
while (iter.hasNext()) {
sds.add(iter.next());
}
editLog.close();
cluster.shutdown();
for ( StorageDirectory sd : sds) {
File editFile=NNStorage.getFinalizedEditsFile(sd,1,3);
assertTrue(editFile.exists());
long fileLen=editFile.length();
LOG.debug("Corrupting Log File: " + editFile + " len: "+ fileLen);
RandomAccessFile rwf=new RandomAccessFile(editFile,"rw");
rwf.seek(fileLen - 4);
int b=rwf.readInt();
rwf.seek(fileLen - 4);
rwf.writeInt(b + 1);
rwf.close();
}
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).build();
fail("should not be able to start");
}
catch ( IOException e) {
assertNotNull("Cause of exception should be ChecksumException",e.getCause());
assertEquals("Cause of exception should be ChecksumException",ChecksumException.class,e.getCause().getClass());
}
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
/**
* Tests rolling edit logs while transactions are ongoing.
*/
@Test public void testEditLogRolling() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
AtomicReference caughtErr=new AtomicReference();
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
FSImage fsimage=namesystem.getFSImage();
StorageDirectory sd=fsimage.getStorage().getStorageDir(0);
startTransactionWorkers(namesystem,caughtErr);
long previousLogTxId=1;
for (int i=0; i < NUM_ROLLS && caughtErr.get() == null; i++) {
try {
Thread.sleep(20);
}
catch ( InterruptedException e) {
}
LOG.info("Starting roll " + i + ".");
CheckpointSignature sig=namesystem.rollEditLog();
long nextLog=sig.curSegmentTxId;
String logFileName=NNStorage.getFinalizedEditsFileName(previousLogTxId,nextLog - 1);
previousLogTxId+=verifyEditLogs(namesystem,fsimage,logFileName,previousLogTxId);
assertEquals(previousLogTxId,nextLog);
File expectedLog=NNStorage.getInProgressEditsFile(sd,previousLogTxId);
assertTrue("Expect " + expectedLog + " to exist",expectedLog.exists());
}
}
finally {
stopTransactionWorkers();
if (caughtErr.get() != null) {
throw new RuntimeException(caughtErr.get());
}
if (fileSys != null) fileSys.close();
if (cluster != null) cluster.shutdown();
}
}
IterativeVerifierInternalCallVerifierEqualityVerifier
/**
* Tests saving fs image while transactions are ongoing.
*/
@Test public void testSaveNamespace() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
AtomicReference caughtErr=new AtomicReference();
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
FSImage fsimage=namesystem.getFSImage();
FSEditLog editLog=fsimage.getEditLog();
startTransactionWorkers(namesystem,caughtErr);
for (int i=0; i < NUM_SAVE_IMAGE && caughtErr.get() == null; i++) {
try {
Thread.sleep(20);
}
catch ( InterruptedException e) {
}
LOG.info("Save " + i + ": entering safe mode");
namesystem.enterSafeMode(false);
long logStartTxId=fsimage.getStorage().getMostRecentCheckpointTxId() + 1;
verifyEditLogs(namesystem,fsimage,NNStorage.getInProgressEditsFileName(logStartTxId),logStartTxId);
LOG.info("Save " + i + ": saving namespace");
namesystem.saveNamespace();
LOG.info("Save " + i + ": leaving safemode");
long savedImageTxId=fsimage.getStorage().getMostRecentCheckpointTxId();
verifyEditLogs(namesystem,fsimage,NNStorage.getFinalizedEditsFileName(logStartTxId,savedImageTxId),logStartTxId);
assertEquals(fsimage.getStorage().getMostRecentCheckpointTxId(),editLog.getLastWrittenTxId() - 1);
namesystem.leaveSafeMode();
LOG.info("Save " + i + ": complete");
}
}
finally {
stopTransactionWorkers();
if (caughtErr.get() != null) {
throw new RuntimeException(caughtErr.get());
}
if (fileSys != null) fileSys.close();
if (cluster != null) cluster.shutdown();
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* The logSync() method in FSEditLog is unsynchronized whiel syncing
* so that other threads can concurrently enqueue edits while the prior
* sync is ongoing. This test checks that the log is saved correctly
* if the saveImage occurs while the syncing thread is in the unsynchronized middle section.
* This replicates the following manual test proposed by Konstantin:
* I start the name-node in debugger.
* I do -mkdir and stop the debugger in logSync() just before it does flush.
* Then I enter safe mode with another client
* I start saveNamepsace and stop the debugger in
* FSImage.saveFSImage() -> FSEditLog.createEditLogFile()
* -> EditLogFileOutputStream.create() ->
* after truncating the file but before writing LAYOUT_VERSION into it.
* Then I let logSync() run.
* Then I terminate the name-node.
* After that the name-node wont start, since the edits file is broken.
*/
@Test public void testSaveImageWhileSyncInProgress() throws Exception {
Configuration conf=getConf();
NameNode.initMetrics(conf,NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
final FSNamesystem namesystem=FSNamesystem.loadFromDisk(conf);
try {
FSImage fsimage=namesystem.getFSImage();
FSEditLog editLog=fsimage.getEditLog();
JournalAndStream jas=editLog.getJournals().get(0);
EditLogFileOutputStream spyElos=spy((EditLogFileOutputStream)jas.getCurrentStream());
jas.setCurrentStreamForTests(spyElos);
final AtomicReference deferredException=new AtomicReference();
final CountDownLatch waitToEnterFlush=new CountDownLatch(1);
final Thread doAnEditThread=new Thread(){
@Override public void run(){
try {
LOG.info("Starting mkdirs");
namesystem.mkdirs("/test",new PermissionStatus("test","test",new FsPermission((short)00755)),true);
LOG.info("mkdirs complete");
}
catch ( Throwable ioe) {
LOG.fatal("Got exception",ioe);
deferredException.set(ioe);
waitToEnterFlush.countDown();
}
}
}
;
Answer blockingFlush=new Answer(){
@Override public Void answer( InvocationOnMock invocation) throws Throwable {
LOG.info("Flush called");
if (Thread.currentThread() == doAnEditThread) {
LOG.info("edit thread: Telling main thread we made it to flush section...");
waitToEnterFlush.countDown();
LOG.info("edit thread: sleeping for " + BLOCK_TIME + "secs");
Thread.sleep(BLOCK_TIME * 1000);
LOG.info("Going through to flush. This will allow the main thread to continue.");
}
invocation.callRealMethod();
LOG.info("Flush complete");
return null;
}
}
;
doAnswer(blockingFlush).when(spyElos).flush();
doAnEditThread.start();
LOG.info("Main thread: waiting to enter flush...");
waitToEnterFlush.await();
assertNull(deferredException.get());
LOG.info("Main thread: detected that logSync is in unsynchronized section.");
LOG.info("Trying to enter safe mode.");
LOG.info("This should block for " + BLOCK_TIME + "sec, since flush will sleep that long");
long st=Time.now();
namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
long et=Time.now();
LOG.info("Entered safe mode");
assertTrue(et - st > (BLOCK_TIME - 1) * 1000);
namesystem.saveNamespace();
LOG.info("Joining on edit thread...");
doAnEditThread.join();
assertNull(deferredException.get());
assertEquals(3,verifyEditLogs(namesystem,fsimage,NNStorage.getFinalizedEditsFileName(1,3),1));
assertEquals(1,verifyEditLogs(namesystem,fsimage,NNStorage.getInProgressEditsFileName(4),4));
}
finally {
LOG.info("Closing namesystem");
if (namesystem != null) namesystem.close();
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Most of the FSNamesystem methods have a synchronized section where they
* update the name system itself and write to the edit log, and then
* unsynchronized, they call logSync. This test verifies that, if an
* operation has written to the edit log but not yet synced it,
* we wait for that sync before entering safe mode.
*/
@Test public void testSaveRightBeforeSync() throws Exception {
Configuration conf=getConf();
NameNode.initMetrics(conf,NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
final FSNamesystem namesystem=FSNamesystem.loadFromDisk(conf);
try {
FSImage fsimage=namesystem.getFSImage();
FSEditLog editLog=spy(fsimage.getEditLog());
fsimage.editLog=editLog;
final AtomicReference deferredException=new AtomicReference();
final CountDownLatch waitToEnterSync=new CountDownLatch(1);
final Thread doAnEditThread=new Thread(){
@Override public void run(){
try {
LOG.info("Starting mkdirs");
namesystem.mkdirs("/test",new PermissionStatus("test","test",new FsPermission((short)00755)),true);
LOG.info("mkdirs complete");
}
catch ( Throwable ioe) {
LOG.fatal("Got exception",ioe);
deferredException.set(ioe);
waitToEnterSync.countDown();
}
}
}
;
Answer blockingSync=new Answer(){
@Override public Void answer( InvocationOnMock invocation) throws Throwable {
LOG.info("logSync called");
if (Thread.currentThread() == doAnEditThread) {
LOG.info("edit thread: Telling main thread we made it just before logSync...");
waitToEnterSync.countDown();
LOG.info("edit thread: sleeping for " + BLOCK_TIME + "secs");
Thread.sleep(BLOCK_TIME * 1000);
LOG.info("Going through to logSync. This will allow the main thread to continue.");
}
invocation.callRealMethod();
LOG.info("logSync complete");
return null;
}
}
;
doAnswer(blockingSync).when(editLog).logSync();
doAnEditThread.start();
LOG.info("Main thread: waiting to just before logSync...");
waitToEnterSync.await();
assertNull(deferredException.get());
LOG.info("Main thread: detected that logSync about to be called.");
LOG.info("Trying to enter safe mode.");
LOG.info("This should block for " + BLOCK_TIME + "sec, since we have pending edits");
long st=Time.now();
namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
long et=Time.now();
LOG.info("Entered safe mode");
assertTrue(et - st > (BLOCK_TIME - 1) * 1000);
namesystem.saveNamespace();
LOG.info("Joining on edit thread...");
doAnEditThread.join();
assertNull(deferredException.get());
assertEquals(3,verifyEditLogs(namesystem,fsimage,NNStorage.getFinalizedEditsFileName(1,3),1));
assertEquals(1,verifyEditLogs(namesystem,fsimage,NNStorage.getInProgressEditsFileName(4),4));
}
finally {
LOG.info("Closing namesystem");
if (namesystem != null) namesystem.close();
}
}
IterativeVerifierUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=300000) public void testXAttrMultiAddRemoveErrors() throws Exception {
List existingXAttrs=Lists.newArrayList();
List toAdd=Lists.newArrayList();
toAdd.add(generatedXAttrs.get(0));
toAdd.add(generatedXAttrs.get(1));
toAdd.add(generatedXAttrs.get(2));
toAdd.add(generatedXAttrs.get(0));
try {
fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.CREATE));
fail("Specified the same xattr to be set twice");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Cannot specify the same " + "XAttr to be set",e);
}
toAdd.remove(generatedXAttrs.get(0));
existingXAttrs.add(generatedXAttrs.get(0));
try {
fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.CREATE));
fail("Set XAttr that is already set without REPLACE flag");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("already exists",e);
}
try {
fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.REPLACE));
fail("Set XAttr that does not exist without the CREATE flag");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("does not exist",e);
}
toAdd.remove(generatedXAttrs.get(0));
List newXAttrs=fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.CREATE));
assertEquals("Unexpected toAdd size",2,toAdd.size());
for ( XAttr x : toAdd) {
assertTrue("Did not find added XAttr " + x,newXAttrs.contains(x));
}
existingXAttrs=newXAttrs;
toAdd=Lists.newArrayList();
for (int i=0; i < 3; i++) {
XAttr xAttr=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.SYSTEM).setName("a" + i).setValue(new byte[]{(byte)(i * 2)}).build();
toAdd.add(xAttr);
}
newXAttrs=fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.REPLACE));
assertEquals("Unexpected number of new XAttrs",3,newXAttrs.size());
for (int i=0; i < 3; i++) {
assertArrayEquals("Unexpected XAttr value",new byte[]{(byte)(i * 2)},newXAttrs.get(i).getValue());
}
existingXAttrs=newXAttrs;
toAdd=Lists.newArrayList();
for (int i=0; i < 4; i++) {
toAdd.add(generatedXAttrs.get(i));
}
newXAttrs=fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
verifyXAttrsPresent(newXAttrs,4);
}
APIUtilityVerifierIterativeVerifierEqualityVerifier
/**
* Test setting and removing multiple xattrs via single operations
*/
@Test(timeout=300000) public void testXAttrMultiSetRemove() throws Exception {
List existingXAttrs=Lists.newArrayListWithCapacity(0);
final Random rand=new Random(0xFEEDA);
int numExpectedXAttrs=0;
while (numExpectedXAttrs < numGeneratedXAttrs) {
LOG.info("Currently have " + numExpectedXAttrs + " xattrs");
final int numToAdd=rand.nextInt(5) + 1;
List toAdd=Lists.newArrayListWithCapacity(numToAdd);
for (int i=0; i < numToAdd; i++) {
if (numExpectedXAttrs >= numGeneratedXAttrs) {
break;
}
toAdd.add(generatedXAttrs.get(numExpectedXAttrs));
numExpectedXAttrs++;
}
LOG.info("Attempting to add " + toAdd.size() + " XAttrs");
for (int i=0; i < toAdd.size(); i++) {
LOG.info("Will add XAttr " + toAdd.get(i));
}
List newXAttrs=fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.CREATE));
verifyXAttrsPresent(newXAttrs,numExpectedXAttrs);
existingXAttrs=newXAttrs;
}
while (numExpectedXAttrs > 0) {
LOG.info("Currently have " + numExpectedXAttrs + " xattrs");
final int numToRemove=rand.nextInt(5) + 1;
List toRemove=Lists.newArrayListWithCapacity(numToRemove);
for (int i=0; i < numToRemove; i++) {
if (numExpectedXAttrs == 0) {
break;
}
toRemove.add(generatedXAttrs.get(numExpectedXAttrs - 1));
numExpectedXAttrs--;
}
final int expectedNumToRemove=toRemove.size();
LOG.info("Attempting to remove " + expectedNumToRemove + " XAttrs");
List removedXAttrs=Lists.newArrayList();
List newXAttrs=fsdir.filterINodeXAttrs(existingXAttrs,toRemove,removedXAttrs);
assertEquals("Unexpected number of removed XAttrs",expectedNumToRemove,removedXAttrs.size());
verifyXAttrsPresent(newXAttrs,numExpectedXAttrs);
existingXAttrs=newXAttrs;
}
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testINodeXAttrsLimit() throws Exception {
List existingXAttrs=Lists.newArrayListWithCapacity(2);
XAttr xAttr1=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).setName("a1").setValue(new byte[]{0x31,0x32,0x33}).build();
XAttr xAttr2=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).setName("a2").setValue(new byte[]{0x31,0x31,0x31}).build();
existingXAttrs.add(xAttr1);
existingXAttrs.add(xAttr2);
XAttr newSystemXAttr=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.SYSTEM).setName("a3").setValue(new byte[]{0x33,0x33,0x33}).build();
XAttr newRawXAttr=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.RAW).setName("a3").setValue(new byte[]{0x33,0x33,0x33}).build();
List newXAttrs=Lists.newArrayListWithCapacity(2);
newXAttrs.add(newSystemXAttr);
newXAttrs.add(newRawXAttr);
List xAttrs=fsdir.setINodeXAttrs(existingXAttrs,newXAttrs,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
assertEquals(xAttrs.size(),4);
XAttr newXAttr1=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.TRUSTED).setName("a4").setValue(new byte[]{0x34,0x34,0x34}).build();
newXAttrs.set(0,newXAttr1);
try {
fsdir.setINodeXAttrs(existingXAttrs,newXAttrs,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
fail("Setting user visible xattr on inode should fail if " + "reaching limit.");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Cannot add additional XAttr " + "to inode, would exceed limit",e);
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
/**
* Test when there is snapshot taken on root
*/
@Test public void testSnapshotOnRoot() throws Exception {
final Path root=new Path("/");
hdfs.allowSnapshot(root);
hdfs.createSnapshot(root,"s1");
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPLICATION).build();
cluster.waitActive();
fsn=cluster.getNamesystem();
hdfs=cluster.getFileSystem();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPLICATION).build();
cluster.waitActive();
fsn=cluster.getNamesystem();
hdfs=cluster.getFileSystem();
INodeDirectory rootNode=fsn.dir.getINode4Write(root.toString()).asDirectory();
assertTrue("The children list of root should be empty",rootNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
List diffList=rootNode.getDiffs().asList();
assertEquals(1,diffList.size());
Snapshot s1=rootNode.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(),diffList.get(0).getSnapshotId());
assertEquals(1,fsn.getSnapshotManager().getNumSnapshottableDirs());
SnapshottableDirectoryStatus[] sdirs=fsn.getSnapshotManager().getSnapshottableDirListing(null);
assertEquals(root,sdirs[0].getFullPath());
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPLICATION).build();
cluster.waitActive();
fsn=cluster.getNamesystem();
hdfs=cluster.getFileSystem();
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Test fsimage loading when 1) there is an empty file loaded from fsimage,
* and 2) there is later an append operation to be applied from edit log.
*/
@Test(timeout=60000) public void testLoadImageWithEmptyFile() throws Exception {
Path file=new Path(dir,"file");
FSDataOutputStream out=hdfs.create(file);
out.close();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
out=hdfs.append(file);
out.write(1);
out.close();
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPLICATION).build();
cluster.waitActive();
hdfs=cluster.getFileSystem();
FileStatus status=hdfs.getFileStatus(file);
assertEquals(1,status.getLen());
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Test that inprogress files are handled correct. Set up a single
* edits directory. Fail on after the last roll. Then verify that the
* logs have the expected number of transactions.
*/
@Test public void testInprogressRecovery() throws IOException {
File f=new File(TestEditLog.TEST_DIR + "/inprogressrecovery");
NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),5,new AbortSpec(5,0));
StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
assertEquals(5 * TXNS_PER_ROLL + TXNS_PER_FAIL,getNumberOfTransactions(jm,1,true,false));
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Make sure that in-progress streams aren't counted if we don't ask for
* them.
*/
@Test public void testExcludeInProgressStreams() throws CorruptionException, IOException {
File f=new File(TestEditLog.TEST_DIR + "/excludeinprogressstreams");
NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10,false);
StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
assertEquals(100,getNumberOfTransactions(jm,1,false,false));
EditLogInputStream elis=getJournalInputStream(jm,90,false);
try {
FSEditLogOp lastReadOp=null;
while ((lastReadOp=elis.readOp()) != null) {
assertTrue(lastReadOp.getTransactionId() <= 100);
}
}
finally {
IOUtils.cleanup(LOG,elis);
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that we receive the correct number of transactions when we count
* the number of transactions around gaps.
* Set up a single edits directory, with no failures. Delete the 4th logfile.
* Test that getNumberOfTransactions returns the correct number of
* transactions before this gap and after this gap. Also verify that if you
* try to count on the gap that an exception is thrown.
*/
@Test public void testManyLogsWithGaps() throws IOException {
File f=new File(TestEditLog.TEST_DIR + "/manylogswithgaps");
NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10);
StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next();
final long startGapTxId=3 * TXNS_PER_ROLL + 1;
final long endGapTxId=4 * TXNS_PER_ROLL;
File[] files=new File(f,"current").listFiles(new FilenameFilter(){
@Override public boolean accept( File dir, String name){
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId,endGapTxId))) {
return true;
}
return false;
}
}
);
assertEquals(1,files.length);
assertTrue(files[0].delete());
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
assertEquals(startGapTxId - 1,getNumberOfTransactions(jm,1,true,true));
assertEquals(0,getNumberOfTransactions(jm,startGapTxId,true,true));
assertEquals(11 * TXNS_PER_ROLL - endGapTxId,getNumberOfTransactions(jm,endGapTxId + 1,true,true));
}
EqualityVerifier
@Test public void testGetRemoteEditLog() throws IOException {
StorageDirectory sd=FSImageTestUtil.mockStorageDirectory(NameNodeDirType.EDITS,false,NNStorage.getFinalizedEditsFileName(1,100),NNStorage.getFinalizedEditsFileName(101,200),NNStorage.getInProgressEditsFileName(201),NNStorage.getFinalizedEditsFileName(1001,1100));
FileJournalManager fjm=new FileJournalManager(conf,sd,null);
assertEquals("[1,100],[101,200],[1001,1100]",getLogsAsString(fjm,1));
assertEquals("[101,200],[1001,1100]",getLogsAsString(fjm,101));
assertEquals("[101,200],[1001,1100]",getLogsAsString(fjm,150));
assertEquals("[1001,1100]",getLogsAsString(fjm,201));
assertEquals("Asking for a newer log than exists should return empty list","",getLogsAsString(fjm,9999));
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Test the normal operation of loading transactions from
* file journal manager. 3 edits directories are setup without any
* failures. Test that we read in the expected number of transactions.
*/
@Test public void testNormalOperation() throws IOException {
File f1=new File(TestEditLog.TEST_DIR + "/normtest0");
File f2=new File(TestEditLog.TEST_DIR + "/normtest1");
File f3=new File(TestEditLog.TEST_DIR + "/normtest2");
List editUris=ImmutableList.of(f1.toURI(),f2.toURI(),f3.toURI());
NNStorage storage=setupEdits(editUris,5);
long numJournals=0;
for ( StorageDirectory sd : storage.dirIterable(NameNodeDirType.EDITS)) {
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
assertEquals(6 * TXNS_PER_ROLL,getNumberOfTransactions(jm,1,true,false));
numJournals++;
}
assertEquals(3,numJournals);
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierEqualityVerifier
/**
* Make requests with starting transaction ids which don't match the beginning
* txid of some log segments.
* This should succeed.
*/
@Test public void testAskForTransactionsMidfile() throws IOException {
File f=new File(TestEditLog.TEST_DIR + "/askfortransactionsmidfile");
NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10);
StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
final int TOTAL_TXIDS=10 * 11;
for (int txid=1; txid <= TOTAL_TXIDS; txid++) {
assertEquals((TOTAL_TXIDS - txid) + 1,getNumberOfTransactions(jm,txid,true,false));
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Test that we can load an edits directory with a corrupt inprogress file.
* The corrupt inprogress file should be moved to the side.
*/
@Test public void testManyLogsWithCorruptInprogress() throws IOException {
File f=new File(TestEditLog.TEST_DIR + "/manylogswithcorruptinprogress");
NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10,new AbortSpec(10,0));
StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next();
File[] files=new File(f,"current").listFiles(new FilenameFilter(){
@Override public boolean accept( File dir, String name){
if (name.startsWith("edits_inprogress")) {
return true;
}
return false;
}
}
);
assertEquals(files.length,1);
corruptAfterStartSegment(files[0]);
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
assertEquals(10 * TXNS_PER_ROLL + 1,getNumberOfTransactions(jm,1,true,false));
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Test that FileJournalManager behaves correctly despite inprogress
* files in all its edit log directories. Set up 3 directories and fail
* all on the last roll. Verify that the correct number of transaction
* are then loaded.
*/
@Test public void testInprogressRecoveryAll() throws IOException {
File f1=new File(TestEditLog.TEST_DIR + "/failalltest0");
File f2=new File(TestEditLog.TEST_DIR + "/failalltest1");
File f3=new File(TestEditLog.TEST_DIR + "/failalltest2");
List editUris=ImmutableList.of(f1.toURI(),f2.toURI(),f3.toURI());
NNStorage storage=setupEdits(editUris,5,new AbortSpec(5,0),new AbortSpec(5,1),new AbortSpec(5,2));
Iterator dirs=storage.dirIterator(NameNodeDirType.EDITS);
StorageDirectory sd=dirs.next();
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
assertEquals(5 * TXNS_PER_ROLL + TXNS_PER_FAIL,getNumberOfTransactions(jm,1,true,false));
sd=dirs.next();
jm=new FileJournalManager(conf,sd,storage);
assertEquals(5 * TXNS_PER_ROLL + TXNS_PER_FAIL,getNumberOfTransactions(jm,1,true,false));
sd=dirs.next();
jm=new FileJournalManager(conf,sd,storage);
assertEquals(5 * TXNS_PER_ROLL + TXNS_PER_FAIL,getNumberOfTransactions(jm,1,true,false));
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Test that we can read from a stream created by FileJournalManager.
* Create a single edits directory, failing it on the final roll.
* Then try loading from the point of the 3rd roll. Verify that we read
* the correct number of transactions from this point.
*/
@Test public void testReadFromStream() throws IOException {
File f=new File(TestEditLog.TEST_DIR + "/readfromstream");
NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10,new AbortSpec(10,0));
StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
long expectedTotalTxnCount=TXNS_PER_ROLL * 10 + TXNS_PER_FAIL;
assertEquals(expectedTotalTxnCount,getNumberOfTransactions(jm,1,true,false));
long skippedTxns=(3 * TXNS_PER_ROLL);
long startingTxId=skippedTxns + 1;
long numLoadable=getNumberOfTransactions(jm,startingTxId,true,false);
assertEquals(expectedTotalTxnCount - skippedTxns,numLoadable);
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Make sure that we starting reading the correct op when we request a stream
* with a txid in the middle of an edit log file.
*/
@Test public void testReadFromMiddleOfEditLog() throws CorruptionException, IOException {
File f=new File(TestEditLog.TEST_DIR + "/readfrommiddleofeditlog");
NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10);
StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
EditLogInputStream elis=getJournalInputStream(jm,5,true);
try {
FSEditLogOp op=elis.readOp();
assertEquals("read unexpected op",op.getTransactionId(),5);
}
finally {
IOUtils.cleanup(LOG,elis);
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Test a mixture of inprogress files and finalised. Set up 3 edits
* directories and fail the second on the last roll. Verify that reading
* the transactions, reads from the finalised directories.
*/
@Test public void testInprogressRecoveryMixed() throws IOException {
File f1=new File(TestEditLog.TEST_DIR + "/mixtest0");
File f2=new File(TestEditLog.TEST_DIR + "/mixtest1");
File f3=new File(TestEditLog.TEST_DIR + "/mixtest2");
List editUris=ImmutableList.of(f1.toURI(),f2.toURI(),f3.toURI());
NNStorage storage=setupEdits(editUris,5,new AbortSpec(5,1));
Iterator dirs=storage.dirIterator(NameNodeDirType.EDITS);
StorageDirectory sd=dirs.next();
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
assertEquals(6 * TXNS_PER_ROLL,getNumberOfTransactions(jm,1,true,false));
sd=dirs.next();
jm=new FileJournalManager(conf,sd,storage);
assertEquals(5 * TXNS_PER_ROLL + TXNS_PER_FAIL,getNumberOfTransactions(jm,1,true,false));
sd=dirs.next();
jm=new FileJournalManager(conf,sd,storage);
assertEquals(6 * TXNS_PER_ROLL,getNumberOfTransactions(jm,1,true,false));
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that the concat operation is properly persisted in the
* edit log, and properly replayed on restart.
*/
@Test public void testConcatInEditLog() throws Exception {
final Path TEST_DIR=new Path("/testConcatInEditLog");
final long FILE_LEN=blockSize;
Path[] srcFiles=new Path[3];
for (int i=0; i < srcFiles.length; i++) {
Path path=new Path(TEST_DIR,"src-" + i);
DFSTestUtil.createFile(dfs,path,FILE_LEN,REPL_FACTOR,1);
srcFiles[i]=path;
}
Path targetFile=new Path(TEST_DIR,"target");
DFSTestUtil.createFile(dfs,targetFile,FILE_LEN,REPL_FACTOR,1);
dfs.concat(targetFile,srcFiles);
assertTrue(dfs.exists(targetFile));
FileStatus origStatus=dfs.getFileStatus(targetFile);
cluster.restartNameNode(true);
assertTrue(dfs.exists(targetFile));
assertFalse(dfs.exists(srcFiles[0]));
FileStatus statusAfterRestart=dfs.getFileStatus(targetFile);
assertEquals(origStatus.getModificationTime(),statusAfterRestart.getModificationTime());
}
IterativeVerifierUtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Concatenates 10 files into one
* Verifies the final size, deletion of the file, number of blocks
* @throws IOException
*/
@Test public void testConcat() throws IOException, InterruptedException {
final int numFiles=10;
long fileLen=blockSize * 3;
HdfsFileStatus fStatus;
FSDataInputStream stm;
String trg=new String("/trg");
Path trgPath=new Path(trg);
DFSTestUtil.createFile(dfs,trgPath,fileLen,REPL_FACTOR,1);
fStatus=nn.getFileInfo(trg);
long trgLen=fStatus.getLen();
long trgBlocks=nn.getBlockLocations(trg,0,trgLen).locatedBlockCount();
Path[] files=new Path[numFiles];
byte[][] bytes=new byte[numFiles][(int)fileLen];
LocatedBlocks[] lblocks=new LocatedBlocks[numFiles];
long[] lens=new long[numFiles];
int i=0;
for (i=0; i < files.length; i++) {
files[i]=new Path("/file" + i);
Path path=files[i];
System.out.println("Creating file " + path);
DFSTestUtil.createFile(dfs,path,fileLen,REPL_FACTOR,1);
fStatus=nn.getFileInfo(path.toUri().getPath());
lens[i]=fStatus.getLen();
assertEquals(trgLen,lens[i]);
lblocks[i]=nn.getBlockLocations(path.toUri().getPath(),0,lens[i]);
stm=dfs.open(path);
stm.readFully(0,bytes[i]);
stm.close();
}
final UserGroupInformation user1=UserGroupInformation.createUserForTesting("theDoctor",new String[]{"tardis"});
DistributedFileSystem hdfs=(DistributedFileSystem)DFSTestUtil.getFileSystemAs(user1,conf);
try {
hdfs.concat(trgPath,files);
fail("Permission exception expected");
}
catch ( IOException ie) {
System.out.println("Got expected exception for permissions:" + ie.getLocalizedMessage());
}
ContentSummary cBefore=dfs.getContentSummary(trgPath.getParent());
dfs.concat(trgPath,files);
ContentSummary cAfter=dfs.getContentSummary(trgPath.getParent());
assertEquals(cBefore.getFileCount(),cAfter.getFileCount() + files.length);
long totalLen=trgLen;
long totalBlocks=trgBlocks;
for (i=0; i < files.length; i++) {
totalLen+=lens[i];
totalBlocks+=lblocks[i].locatedBlockCount();
}
System.out.println("total len=" + totalLen + "; totalBlocks="+ totalBlocks);
fStatus=nn.getFileInfo(trg);
trgLen=fStatus.getLen();
stm=dfs.open(trgPath);
byte[] byteFileConcat=new byte[(int)trgLen];
stm.readFully(0,byteFileConcat);
stm.close();
trgBlocks=nn.getBlockLocations(trg,0,trgLen).locatedBlockCount();
assertEquals(trgBlocks,totalBlocks);
assertEquals(trgLen,totalLen);
for ( Path p : files) {
fStatus=nn.getFileInfo(p.toUri().getPath());
assertNull("File " + p + " still exists",fStatus);
DFSTestUtil.createFile(dfs,p,fileLen,REPL_FACTOR,1);
}
checkFileContent(byteFileConcat,bytes);
Path smallFile=new Path("/sfile");
int sFileLen=10;
DFSTestUtil.createFile(dfs,smallFile,sFileLen,REPL_FACTOR,1);
dfs.concat(trgPath,new Path[]{smallFile});
fStatus=nn.getFileInfo(trg);
trgLen=fStatus.getLen();
trgBlocks=nn.getBlockLocations(trg,0,trgLen).locatedBlockCount();
assertEquals(trgBlocks,totalBlocks + 1);
assertEquals(trgLen,totalLen + sFileLen);
}
APIUtilityVerifierEqualityVerifier
@Test public void testPreferredBlockSizeUpperBound(){
replication=3;
preferredBlockSize=BLKSIZE_MAXVALUE;
INodeFile inf=createINodeFile(replication,preferredBlockSize);
assertEquals("True has to be returned in this case",BLKSIZE_MAXVALUE,inf.getPreferredBlockSize());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testFileUnderConstruction(){
replication=3;
final INodeFile file=new INodeFile(INodeId.GRANDFATHER_INODE_ID,null,perm,0L,0L,null,replication,1024L);
assertFalse(file.isUnderConstruction());
final String clientName="client";
final String clientMachine="machine";
file.toUnderConstruction(clientName,clientMachine);
assertTrue(file.isUnderConstruction());
FileUnderConstructionFeature uc=file.getFileUnderConstructionFeature();
assertEquals(clientName,uc.getClientName());
assertEquals(clientMachine,uc.getClientMachine());
file.toCompleteFile(Time.now());
assertFalse(file.isUnderConstruction());
}
APIUtilityVerifierEqualityVerifier
/**
* Test for the Replication value. Sets a value and checks if it was set
* correct.
*/
@Test public void testReplication(){
replication=3;
preferredBlockSize=128 * 1024 * 1024;
INodeFile inf=createINodeFile(replication,preferredBlockSize);
assertEquals("True has to be returned in this case",replication,inf.getFileReplication());
}
InternalCallVerifierEqualityVerifier
@Test public void testConcatBlocks(){
INodeFile origFile=createINodeFiles(1,"origfile")[0];
assertEquals("Number of blocks didn't match",origFile.numBlocks(),1L);
INodeFile[] appendFiles=createINodeFiles(4,"appendfile");
origFile.concatBlocks(appendFiles);
assertEquals("Number of blocks didn't match",origFile.numBlocks(),5L);
}
APIUtilityVerifierEqualityVerifier
/**
* Test for the PreferredBlockSize value. Sets a value and checks if it was
* set correct.
*/
@Test public void testPreferredBlockSize(){
replication=3;
preferredBlockSize=128 * 1024 * 1024;
INodeFile inf=createINodeFile(replication,preferredBlockSize);
assertEquals("True has to be returned in this case",preferredBlockSize,inf.getPreferredBlockSize());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* FSDirectory#unprotectedSetQuota creates a new INodeDirectoryWithQuota to
* replace the original INodeDirectory. Before HDFS-4243, the parent field of
* all the children INodes of the target INodeDirectory is not changed to
* point to the new INodeDirectoryWithQuota. This testcase tests this
* scenario.
*/
@Test public void testGetFullPathNameAfterSetQuota() throws Exception {
long fileLen=1024;
replication=3;
Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(replication).build();
cluster.waitActive();
FSNamesystem fsn=cluster.getNamesystem();
FSDirectory fsdir=fsn.getFSDirectory();
DistributedFileSystem dfs=cluster.getFileSystem();
final Path dir=new Path("/dir");
final Path file=new Path(dir,"file");
DFSTestUtil.createFile(dfs,file,fileLen,replication,0L);
INode fnode=fsdir.getINode(file.toString());
assertEquals(file.toString(),fnode.getFullPathName());
dfs.setQuota(dir,Long.MAX_VALUE - 1,replication * fileLen * 10);
INodeDirectory dirNode=getDir(fsdir,dir);
assertEquals(dir.toString(),dirNode.getFullPathName());
assertTrue(dirNode.isWithQuota());
final Path newDir=new Path("/newdir");
final Path newFile=new Path(newDir,"file");
dfs.rename(dir,newDir,Options.Rename.OVERWRITE);
fnode=fsdir.getINode(newFile.toString());
assertEquals(newFile.toString(),fnode.getFullPathName());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
EqualityVerifier
/**
* Test case where two directories are configured as NAME_AND_EDITS
* and one of them fails to save storage. Since the edits and image
* failure states are decoupled, the failure of image saving should
* not prevent the purging of logs from that dir.
*/
@Test public void testPurgingWithNameEditsDirAfterFailure() throws Exception {
MiniDFSCluster cluster=null;
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY,0);
File sd0=new File(TEST_ROOT_DIR,"nn0");
File sd1=new File(TEST_ROOT_DIR,"nn1");
File cd0=new File(sd0,"current");
File cd1=new File(sd1,"current");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,Joiner.on(",").join(sd0,sd1));
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).manageNameDfsDirs(false).format(true).build();
NameNode nn=cluster.getNameNode();
doSaveNamespace(nn);
LOG.info("After first save, images 0 and 2 should exist in both dirs");
assertGlobEquals(cd0,"fsimage_\\d*",getImageFileName(0),getImageFileName(2));
assertGlobEquals(cd1,"fsimage_\\d*",getImageFileName(0),getImageFileName(2));
assertGlobEquals(cd0,"edits_.*",getFinalizedEditsFileName(1,2),getInProgressEditsFileName(3));
assertGlobEquals(cd1,"edits_.*",getFinalizedEditsFileName(1,2),getInProgressEditsFileName(3));
doSaveNamespace(nn);
LOG.info("After second save, image 0 should be purged, " + "and image 4 should exist in both.");
assertGlobEquals(cd0,"fsimage_\\d*",getImageFileName(2),getImageFileName(4));
assertGlobEquals(cd1,"fsimage_\\d*",getImageFileName(2),getImageFileName(4));
assertGlobEquals(cd0,"edits_.*",getFinalizedEditsFileName(3,4),getInProgressEditsFileName(5));
assertGlobEquals(cd1,"edits_.*",getFinalizedEditsFileName(3,4),getInProgressEditsFileName(5));
LOG.info("Failing first storage dir by chmodding it");
assertEquals(0,FileUtil.chmod(cd0.getAbsolutePath(),"000"));
doSaveNamespace(nn);
LOG.info("Restoring accessibility of first storage dir");
assertEquals(0,FileUtil.chmod(cd0.getAbsolutePath(),"755"));
LOG.info("nothing should have been purged in first storage dir");
assertGlobEquals(cd0,"fsimage_\\d*",getImageFileName(2),getImageFileName(4));
assertGlobEquals(cd0,"edits_.*",getFinalizedEditsFileName(3,4),getInProgressEditsFileName(5));
LOG.info("fsimage_2 should be purged in second storage dir");
assertGlobEquals(cd1,"fsimage_\\d*",getImageFileName(4),getImageFileName(6));
assertGlobEquals(cd1,"edits_.*",getFinalizedEditsFileName(5,6),getInProgressEditsFileName(7));
LOG.info("On next save, we should purge logs from the failed dir," + " but not images, since the image directory is in failed state.");
doSaveNamespace(nn);
assertGlobEquals(cd1,"fsimage_\\d*",getImageFileName(6),getImageFileName(8));
assertGlobEquals(cd1,"edits_.*",getFinalizedEditsFileName(7,8),getInProgressEditsFileName(9));
assertGlobEquals(cd0,"fsimage_\\d*",getImageFileName(2),getImageFileName(4));
assertGlobEquals(cd0,"edits_.*",getInProgressEditsFileName(9));
}
finally {
FileUtil.chmod(cd0.getAbsolutePath(),"755");
LOG.info("Shutting down...");
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifierEqualityVerifier
/**
* Tests that only a single space check is performed if two name dirs are
* supplied which are on the same volume.
*/
@Test public void testChecking2NameDirsOnOneVolume() throws IOException {
Configuration conf=new Configuration();
File nameDir1=new File(BASE_DIR,"name-dir1");
File nameDir2=new File(BASE_DIR,"name-dir2");
nameDir1.mkdirs();
nameDir2.mkdirs();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,nameDir1.getAbsolutePath() + "," + nameDir2.getAbsolutePath());
conf.setLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY,Long.MAX_VALUE);
NameNodeResourceChecker nb=new NameNodeResourceChecker(conf);
assertEquals("Should not check the same volume more than once.",1,nb.getVolumesLowOnSpace().size());
}
InternalCallVerifierEqualityVerifier
/**
* Tests that only a single space check is performed if extra volumes are
* configured manually which also coincide with a volume the name dir is on.
*/
@Test public void testCheckingExtraVolumes() throws IOException {
Configuration conf=new Configuration();
File nameDir=new File(BASE_DIR,"name-dir");
nameDir.mkdirs();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,nameDir.getAbsolutePath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_KEY,nameDir.getAbsolutePath());
conf.setLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY,Long.MAX_VALUE);
NameNodeResourceChecker nb=new NameNodeResourceChecker(conf);
assertEquals("Should not check the same volume more than once.",1,nb.getVolumesLowOnSpace().size());
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test for crateSnapshot
*/
@Test public void testSnapshotMethods() throws Exception {
String dir="/testNamenodeRetryCache/testCreateSnapshot/src";
resetCall();
namesystem.mkdirs(dir,perm,true);
namesystem.allowSnapshot(dir);
newCall();
String name=namesystem.createSnapshot(dir,"snap1");
Assert.assertEquals(name,namesystem.createSnapshot(dir,"snap1"));
Assert.assertEquals(name,namesystem.createSnapshot(dir,"snap1"));
Assert.assertEquals(name,namesystem.createSnapshot(dir,"snap1"));
newCall();
try {
namesystem.createSnapshot(dir,"snap1");
Assert.fail("testSnapshotMethods expected exception is not thrown");
}
catch ( IOException e) {
}
newCall();
namesystem.renameSnapshot(dir,"snap1","snap2");
namesystem.renameSnapshot(dir,"snap1","snap2");
namesystem.renameSnapshot(dir,"snap1","snap2");
newCall();
try {
namesystem.renameSnapshot(dir,"snap1","snap2");
Assert.fail("testSnapshotMethods expected exception is not thrown");
}
catch ( IOException e) {
}
newCall();
namesystem.deleteSnapshot(dir,"snap2");
namesystem.deleteSnapshot(dir,"snap2");
namesystem.deleteSnapshot(dir,"snap2");
newCall();
try {
namesystem.deleteSnapshot(dir,"snap2");
Assert.fail("testSnapshotMethods expected exception is not thrown");
}
catch ( IOException e) {
}
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* After run a set of operations, restart NN and check if the retry cache has
* been rebuilt based on the editlog.
*/
@Test public void testRetryCacheRebuild() throws Exception {
DFSTestUtil.runOperations(cluster,filesystem,conf,BlockSize,0);
LightWeightCache cacheSet=(LightWeightCache)namesystem.getRetryCache().getCacheSet();
assertEquals(23,cacheSet.size());
Map oldEntries=new HashMap();
Iterator iter=cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry=iter.next();
oldEntries.put(entry,entry);
}
cluster.restartNameNode();
cluster.waitActive();
namesystem=cluster.getNamesystem();
assertTrue(namesystem.hasRetryCache());
cacheSet=(LightWeightCache)namesystem.getRetryCache().getCacheSet();
assertEquals(23,cacheSet.size());
iter=cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry=iter.next();
assertTrue(oldEntries.containsKey(entry));
}
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test for create file
*/
@Test public void testCreate() throws Exception {
String src="/testNamenodeRetryCache/testCreate/file";
newCall();
HdfsFileStatus status=namesystem.startFile(src,perm,"holder","clientmachine",EnumSet.of(CreateFlag.CREATE),true,(short)1,BlockSize,null);
Assert.assertEquals(status,namesystem.startFile(src,perm,"holder","clientmachine",EnumSet.of(CreateFlag.CREATE),true,(short)1,BlockSize,null));
Assert.assertEquals(status,namesystem.startFile(src,perm,"holder","clientmachine",EnumSet.of(CreateFlag.CREATE),true,(short)1,BlockSize,null));
newCall();
try {
namesystem.startFile(src,perm,"holder","clientmachine",EnumSet.of(CreateFlag.CREATE),true,(short)1,BlockSize,null);
Assert.fail("testCreate - expected exception is not thrown");
}
catch ( IOException e) {
}
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test for rename1
*/
@Test public void testAppend() throws Exception {
String src="/testNamenodeRetryCache/testAppend/src";
resetCall();
DFSTestUtil.createFile(filesystem,new Path(src),128,(short)1,0L);
newCall();
LocatedBlock b=namesystem.appendFile(src,"holder","clientMachine");
Assert.assertEquals(b,namesystem.appendFile(src,"holder","clientMachine"));
Assert.assertEquals(b,namesystem.appendFile(src,"holder","clientMachine"));
newCall();
try {
namesystem.appendFile(src,"holder","clientMachine");
Assert.fail("testAppend - expected exception is not thrown");
}
catch ( Exception e) {
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* The corrupt block has to be removed when the number of valid replicas
* matches replication factor for the file. In this the above condition is
* tested by reducing the replication factor
* The test strategy :
* Bring up Cluster with 3 DataNodes
* Create a file of replication factor 3
* Corrupt one replica of a block of the file
* Verify that there are still 2 good replicas and 1 corrupt replica
* (corrupt replica should not be removed since number of good
* replicas (2) is less than replication factor (3))
* Set the replication factor to 2
* Verify that the corrupt replica is removed.
* (corrupt replica should not be removed since number of good
* replicas (2) is equal to replication factor (2))
*/
@Test public void testWhenDecreasingReplication() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,Integer.toString(2));
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
try {
final Path fileName=new Path("/foo1");
DFSTestUtil.createFile(fs,fileName,2,(short)3,0L);
DFSTestUtil.waitReplication(fs,fileName,(short)3);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,fileName);
corruptBlock(cluster,fs,fileName,0,block);
DFSTestUtil.waitReplication(fs,fileName,(short)2);
assertEquals(2,countReplicas(namesystem,block).liveReplicas());
assertEquals(1,countReplicas(namesystem,block).corruptReplicas());
namesystem.setReplication(fileName.toString(),(short)2);
try {
Thread.sleep(3000);
}
catch ( InterruptedException ignored) {
}
assertEquals(2,countReplicas(namesystem,block).liveReplicas());
assertEquals(0,countReplicas(namesystem,block).corruptReplicas());
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* None of the blocks can be removed if all blocks are corrupt.
* The test strategy :
* Bring up Cluster with 3 DataNodes
* Create a file of replication factor 3
* Corrupt all three replicas
* Verify that all replicas are corrupt and 3 replicas are present.
* Set the replication factor to 1
* Verify that all replicas are corrupt and 3 replicas are present.
*/
@Test public void testWithAllCorruptReplicas() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,Integer.toString(2));
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
try {
final Path fileName=new Path("/foo1");
DFSTestUtil.createFile(fs,fileName,2,(short)3,0L);
DFSTestUtil.waitReplication(fs,fileName,(short)3);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,fileName);
corruptBlock(cluster,fs,fileName,0,block);
corruptBlock(cluster,fs,fileName,1,block);
corruptBlock(cluster,fs,fileName,2,block);
try {
Thread.sleep(3000);
}
catch ( InterruptedException ignored) {
}
assertEquals(0,countReplicas(namesystem,block).liveReplicas());
assertEquals(3,countReplicas(namesystem,block).corruptReplicas());
namesystem.setReplication(fileName.toString(),(short)1);
try {
Thread.sleep(3000);
}
catch ( InterruptedException ignored) {
}
assertEquals(0,countReplicas(namesystem,block).liveReplicas());
assertEquals(3,countReplicas(namesystem,block).corruptReplicas());
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* The corrupt block has to be removed when the number of valid replicas
* matches replication factor for the file. In this test, the above
* condition is achieved by increasing the number of good replicas by
* replicating on a new Datanode.
* The test strategy :
* Bring up Cluster with 3 DataNodes
* Create a file of replication factor 3
* Corrupt one replica of a block of the file
* Verify that there are still 2 good replicas and 1 corrupt replica
* (corrupt replica should not be removed since number of good replicas
* (2) is less than replication factor (3))
* Start a new data node
* Verify that the a new replica is created and corrupt replica is
* removed.
*/
@Test public void testByAddingAnExtraDataNode() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,Integer.toString(2));
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
FileSystem fs=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
DataNodeProperties dnPropsFourth=cluster.stopDataNode(3);
try {
final Path fileName=new Path("/foo1");
DFSTestUtil.createFile(fs,fileName,2,(short)3,0L);
DFSTestUtil.waitReplication(fs,fileName,(short)3);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,fileName);
corruptBlock(cluster,fs,fileName,0,block);
DFSTestUtil.waitReplication(fs,fileName,(short)2);
assertEquals(2,countReplicas(namesystem,block).liveReplicas());
assertEquals(1,countReplicas(namesystem,block).corruptReplicas());
cluster.restartDataNode(dnPropsFourth);
DFSTestUtil.waitReplication(fs,fileName,(short)3);
assertEquals(3,countReplicas(namesystem,block).liveReplicas());
assertEquals(0,countReplicas(namesystem,block).corruptReplicas());
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* The corrupt block has to be removed when the number of valid replicas
* matches replication factor for the file. The above condition should hold
* true as long as there is one good replica. This test verifies that.
* The test strategy :
* Bring up Cluster with 2 DataNodes
* Create a file of replication factor 2
* Corrupt one replica of a block of the file
* Verify that there is one good replicas and 1 corrupt replica
* (corrupt replica should not be removed since number of good
* replicas (1) is less than replication factor (2)).
* Set the replication factor to 1
* Verify that the corrupt replica is removed.
* (corrupt replica should be removed since number of good
* replicas (1) is equal to replication factor (1))
*/
@Test(timeout=20000) public void testWithReplicationFactorAsOne() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,Integer.toString(2));
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
try {
final Path fileName=new Path("/foo1");
DFSTestUtil.createFile(fs,fileName,2,(short)2,0L);
DFSTestUtil.waitReplication(fs,fileName,(short)2);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,fileName);
corruptBlock(cluster,fs,fileName,0,block);
DFSTestUtil.waitReplication(fs,fileName,(short)1);
assertEquals(1,countReplicas(namesystem,block).liveReplicas());
assertEquals(1,countReplicas(namesystem,block).corruptReplicas());
namesystem.setReplication(fileName.toString(),(short)1);
for (int i=0; i < 10; i++) {
try {
Thread.sleep(1000);
}
catch ( InterruptedException ignored) {
}
if (countReplicas(namesystem,block).corruptReplicas() == 0) {
break;
}
}
assertEquals(1,countReplicas(namesystem,block).liveReplicas());
assertEquals(0,countReplicas(namesystem,block).corruptReplicas());
}
finally {
cluster.shutdown();
}
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test(timeout=120000) public void testXattrConfiguration() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
try {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,-1);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
fail("Expected exception with negative xattr size");
}
catch ( IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("Cannot set a negative value for the maximum size of an xattr",e);
}
finally {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT);
if (cluster != null) {
cluster.shutdown();
}
}
try {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY,-1);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
fail("Expected exception with negative # xattrs per inode");
}
catch ( IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("Cannot set a negative limit on the number of xattrs per inode",e);
}
finally {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY,DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT);
if (cluster != null) {
cluster.shutdown();
}
}
try {
final LogVerificationAppender appender=new LogVerificationAppender();
final Logger logger=Logger.getRootLogger();
logger.addAppender(appender);
int count=appender.countLinesWithMessage("Maximum size of an xattr: 0 (unlimited)");
assertEquals("Expected no messages about unlimited xattr size",0,count);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,0);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
count=appender.countLinesWithMessage("Maximum size of an xattr: 0 (unlimited)");
assertEquals("Expected unlimited xattr size",2,count);
}
finally {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT);
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifierEqualityVerifier
/**
* Verify the following scenario.
* 1. NN restarts.
* 2. Heartbeat RPC will retry and succeed. NN asks DN to reregister.
* 3. After reregistration completes, DN will send Heartbeat, followed by
* Blockreport.
* 4. NN will mark DatanodeStorageInfo#blockContentsStale to false.
* @throws Exception
*/
@Test(timeout=60000) public void testStorageBlockContentsStaleAfterNNRestart() throws Exception {
MiniDFSCluster dfsCluster=null;
try {
Configuration config=new Configuration();
dfsCluster=new MiniDFSCluster.Builder(config).numDataNodes(1).build();
dfsCluster.waitActive();
dfsCluster.restartNameNode(true);
BlockManagerTestUtil.checkHeartbeat(dfsCluster.getNamesystem().getBlockManager());
MBeanServer mbs=ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanNameFsns=new ObjectName("Hadoop:service=NameNode,name=FSNamesystemState");
Integer numStaleStorages=(Integer)(mbs.getAttribute(mxbeanNameFsns,"NumStaleStorages"));
assertEquals(0,numStaleStorages.intValue());
}
finally {
if (dfsCluster != null) {
dfsCluster.shutdown();
}
}
return;
}
UtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* This test tests hosts include list contains host names. After namenode
* restarts, the still alive datanodes should not have any trouble in getting
* registrant again.
*/
@Test public void testNNRestart() throws IOException, InterruptedException {
MiniDFSCluster cluster=null;
FileSystem localFileSys;
Path hostsFile;
Path excludeFile;
int HEARTBEAT_INTERVAL=1;
localFileSys=FileSystem.getLocal(config);
Path workingDir=localFileSys.getWorkingDirectory();
Path dir=new Path(workingDir,"build/test/data/work-dir/restartnn");
hostsFile=new Path(dir,"hosts");
excludeFile=new Path(dir,"exclude");
config.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE,excludeFile.toUri().getPath());
writeConfigFile(localFileSys,excludeFile,null);
config.set(DFSConfigKeys.DFS_HOSTS,hostsFile.toUri().getPath());
ArrayList list=new ArrayList();
byte b[]={127,0,0,1};
InetAddress inetAddress=InetAddress.getByAddress(b);
list.add(inetAddress.getHostName());
writeConfigFile(localFileSys,hostsFile,list);
int numDatanodes=1;
try {
cluster=new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).setupHostsFile(true).build();
cluster.waitActive();
cluster.restartNameNode();
NamenodeProtocols nn=cluster.getNameNodeRpc();
assertNotNull(nn);
assertTrue(cluster.isDataNodeUp());
DatanodeInfo[] info=nn.getDatanodeReport(DatanodeReportType.LIVE);
for (int i=0; i < 5 && info.length != numDatanodes; i++) {
Thread.sleep(HEARTBEAT_INTERVAL * 1000);
info=nn.getDatanodeReport(DatanodeReportType.LIVE);
}
assertEquals("Number of live nodes should be " + numDatanodes,numDatanodes,info.length);
}
catch ( IOException e) {
fail(StringUtils.stringifyException(e));
throw e;
}
finally {
cleanupFile(localFileSys,excludeFile.getParent());
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifierEqualityVerifier
/**
* Tests the upgrade from version 0.22 to Federation version Test with
* clusterid case: -upgrade -clusterid
* Expected to reuse user given clusterid
* @throws Exception
*/
@Test public void testStartupOptUpgradeFrom22WithCID() throws Exception {
startOpt.setClusterId("cid");
layoutVersion=Feature.RESERVED_REL22.getInfo().getLayoutVersion();
storage.processStartupOptionsForUpgrade(startOpt,layoutVersion);
assertEquals("Clusterid should match with the given clusterid","cid",storage.getClusterID());
}
InternalCallVerifierEqualityVerifier
/**
* Tests the upgrade from one version of Federation to another Federation
* version Test with wrong clusterid case: -upgrade -clusterid
* Expected to reuse existing clusterid and ignore user given clusterid
* @throws Exception
*/
@Test public void testStartupOptUpgradeFromFederationWithWrongCID() throws Exception {
startOpt.setClusterId("wrong-cid");
storage.setClusterID("currentcid");
layoutVersion=Feature.FEDERATION.getInfo().getLayoutVersion();
storage.processStartupOptionsForUpgrade(startOpt,layoutVersion);
assertEquals("Clusterid should match with the existing one","currentcid",storage.getClusterID());
}
InternalCallVerifierEqualityVerifier
/**
* Tests the upgrade from one version of Federation to another Federation
* version Test without clusterid case: -upgrade
* Expected to reuse existing clusterid
* @throws Exception
*/
@Test public void testStartupOptUpgradeFromFederation() throws Exception {
storage.setClusterID("currentcid");
layoutVersion=Feature.FEDERATION.getInfo().getLayoutVersion();
storage.processStartupOptionsForUpgrade(startOpt,layoutVersion);
assertEquals("Clusterid should match with the existing one","currentcid",storage.getClusterID());
}
InternalCallVerifierEqualityVerifier
/**
* Tests the upgrade from one version of Federation to another Federation
* version Test with correct clusterid case: -upgrade -clusterid
* Expected to reuse existing clusterid and ignore user given clusterid
* @throws Exception
*/
@Test public void testStartupOptUpgradeFromFederationWithCID() throws Exception {
startOpt.setClusterId("currentcid");
storage.setClusterID("currentcid");
layoutVersion=Feature.FEDERATION.getInfo().getLayoutVersion();
storage.processStartupOptionsForUpgrade(startOpt,layoutVersion);
assertEquals("Clusterid should match with the existing one","currentcid",storage.getClusterID());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test for the case where the shared edits dir doesn't have
* all of the recent edit logs.
*/
@Test public void testSharedEditsMissingLogs() throws Exception {
removeStandbyNameDirs();
CheckpointSignature sig=nn0.getRpcServer().rollEditLog();
assertEquals(3,sig.getCurSegmentTxId());
URI editsUri=cluster.getSharedEditsDir(0,1);
File editsDir=new File(editsUri);
File editsSegment=new File(new File(editsDir,"current"),NNStorage.getFinalizedEditsFileName(1,2));
GenericTestUtils.assertExists(editsSegment);
assertTrue(editsSegment.delete());
LogCapturer logs=GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(BootstrapStandby.class));
try {
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(BootstrapStandby.ERR_CODE_LOGS_UNAVAILABLE,rc);
}
finally {
logs.stopCapturing();
}
GenericTestUtils.assertMatches(logs.getOutput(),"FATAL.*Unable to read transaction ids 1-3 from the configured shared");
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test for the base success case. The primary NN
* hasn't made any checkpoints, and we copy the fsimage_0
* file over and start up.
*/
@Test public void testSuccessfulBaseCase() throws Exception {
removeStandbyNameDirs();
try {
cluster.restartNameNode(1);
fail("Did not throw");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("storage directory does not exist or is not accessible",ioe);
}
int rc=BootstrapStandby.run(new String[]{"-nonInteractive"},cluster.getConfiguration(1));
assertEquals(0,rc);
FSImageTestUtil.assertNNHasCheckpoints(cluster,1,ImmutableList.of(0));
FSImageTestUtil.assertNNFilesMatch(cluster);
cluster.restartNameNode(1);
}
InternalCallVerifierEqualityVerifier
/**
* Test that, even if the other node is not active, we are able
* to bootstrap standby from it.
*/
@Test(timeout=30000) public void testOtherNodeNotActive() throws Exception {
cluster.transitionToStandby(0);
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
}
InternalCallVerifierEqualityVerifier
@Test public void testStandbyDirsAlreadyExist() throws Exception {
int rc=BootstrapStandby.run(new String[]{"-nonInteractive"},cluster.getConfiguration(1));
assertEquals(BootstrapStandby.ERR_CODE_ALREADY_FORMATTED,rc);
rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Test for downloading a checkpoint made at a later checkpoint
* from the active.
*/
@Test public void testDownloadingLaterCheckpoint() throws Exception {
nn0.getRpcServer().rollEditLog();
nn0.getRpcServer().rollEditLog();
NameNodeAdapter.enterSafeMode(nn0,false);
NameNodeAdapter.saveNamespace(nn0);
NameNodeAdapter.leaveSafeMode(nn0);
long expectedCheckpointTxId=NameNodeAdapter.getNamesystem(nn0).getFSImage().getMostRecentCheckpointTxId();
assertEquals(6,expectedCheckpointTxId);
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
FSImageTestUtil.assertNNHasCheckpoints(cluster,1,ImmutableList.of((int)expectedCheckpointTxId));
FSImageTestUtil.assertNNFilesMatch(cluster);
cluster.restartNameNode(1);
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* BootstrapStandby when the existing NN is standby
*/
@Test public void testBootstrapStandbyWithStandbyNN() throws Exception {
cluster.transitionToStandby(0);
Configuration confNN1=cluster.getConfiguration(1);
cluster.shutdownNameNode(1);
int rc=BootstrapStandby.run(new String[]{"-force"},confNN1);
assertEquals(0,rc);
FSImageTestUtil.assertNNHasCheckpoints(cluster,1,ImmutableList.of(0));
FSImageTestUtil.assertNNFilesMatch(cluster);
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* BootstrapStandby when the existing NN is active
*/
@Test public void testBootstrapStandbyWithActiveNN() throws Exception {
cluster.transitionToActive(0);
Configuration confNN1=cluster.getConfiguration(1);
cluster.shutdownNameNode(1);
int rc=BootstrapStandby.run(new String[]{"-force"},confNN1);
assertEquals(0,rc);
FSImageTestUtil.assertNNHasCheckpoints(cluster,1,ImmutableList.of(0));
FSImageTestUtil.assertNNFilesMatch(cluster);
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Ensure that an admin cannot finalize an HA upgrade without at least one NN
* being active.
*/
@Test public void testCannotFinalizeIfNoActive() throws IOException, URISyntaxException {
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
File sharedDir=new File(cluster.getSharedEditsDir(0,1));
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
checkPreviousDirExistence(sharedDir,false);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkPreviousDirExistence(sharedDir,true);
assertTrue(fs.mkdirs(new Path("/foo2")));
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
cluster.restartNameNode(0,false);
cluster.transitionToActive(0);
assertTrue(fs.mkdirs(new Path("/foo3")));
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
cluster.restartNameNode(1);
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
assertTrue(fs.mkdirs(new Path("/foo4")));
assertCTimesEqual(cluster);
cluster.transitionToStandby(1);
try {
runFinalizeCommand(cluster);
fail("Should not have been able to finalize upgrade with no NN active");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Cannot finalize with no NameNode active",ioe);
}
}
finally {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Make sure that an HA NN with NFS-based HA can successfully start and
* upgrade.
*/
@Test public void testNfsUpgrade() throws IOException, URISyntaxException {
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
File sharedDir=new File(cluster.getSharedEditsDir(0,1));
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
checkPreviousDirExistence(sharedDir,false);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkPreviousDirExistence(sharedDir,true);
assertTrue(fs.mkdirs(new Path("/foo2")));
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
cluster.restartNameNode(0,false);
cluster.transitionToActive(0);
assertTrue(fs.mkdirs(new Path("/foo3")));
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
cluster.restartNameNode(1);
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
assertTrue(fs.mkdirs(new Path("/foo4")));
assertCTimesEqual(cluster);
}
finally {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Make sure that even if the NN which initiated the upgrade is in the standby
* state that we're allowed to finalize.
*/
@Test public void testFinalizeFromSecondNameNodeWithJournalNodes() throws IOException, URISyntaxException {
MiniQJMHACluster qjCluster=null;
FileSystem fs=null;
try {
Builder builder=new MiniQJMHACluster.Builder(conf);
builder.getDfsBuilder().numDataNodes(0);
qjCluster=builder.build();
MiniDFSCluster cluster=qjCluster.getDfsCluster();
checkJnPreviousDirExistence(qjCluster,false);
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkJnPreviousDirExistence(qjCluster,true);
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
cluster.restartNameNode(1);
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
runFinalizeCommand(cluster);
checkClusterPreviousDirExistence(cluster,false);
checkJnPreviousDirExistence(qjCluster,false);
assertCTimesEqual(cluster);
}
finally {
if (fs != null) {
fs.close();
}
if (qjCluster != null) {
qjCluster.shutdown();
}
}
}
InternalCallVerifierEqualityVerifier
/**
* Another regression test for HDFS-2742. This tests the following sequence:
* - DN does a block report while file is open. This BR contains
* the block in RBW state.
* - The block report is delayed in reaching the standby.
* - The file is closed.
* - The standby processes the OP_ADD and OP_CLOSE operations before
* the RBW block report arrives.
* - The standby should not mark the block as corrupt.
*/
@Test public void testRBWReportArrivesAfterEdits() throws Exception {
final CountDownLatch brFinished=new CountDownLatch(1);
DelayAnswer delayer=new GenericTestUtils.DelayAnswer(LOG){
@Override protected Object passThrough( InvocationOnMock invocation) throws Throwable {
try {
return super.passThrough(invocation);
}
finally {
brFinished.countDown();
}
}
}
;
FSDataOutputStream out=fs.create(TEST_FILE_PATH);
try {
AppendTestUtil.write(out,0,10);
out.hflush();
DataNode dn=cluster.getDataNodes().get(0);
DatanodeProtocolClientSideTranslatorPB spy=DataNodeTestUtils.spyOnBposToNN(dn,nn2);
Mockito.doAnswer(delayer).when(spy).blockReport(Mockito.anyObject(),Mockito.anyString(),Mockito.anyObject());
dn.scheduleAllBlockReport(0);
delayer.waitForCall();
}
finally {
IOUtils.closeStream(out);
}
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
delayer.proceed();
brFinished.await();
BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
BlockManagerTestUtil.updateState(nn2.getNamesystem().getBlockManager());
assertEquals(0,nn1.getNamesystem().getCorruptReplicaBlocks());
assertEquals(0,nn2.getNamesystem().getCorruptReplicaBlocks());
DFSTestUtil.readFile(fs,TEST_FILE_PATH);
}
InternalCallVerifierEqualityVerifier
/**
* Test that, when a block is re-opened for append, the related
* datanode messages are correctly queued by the SBN because
* they have future states and genstamps.
*/
@Test public void testQueueingWithAppend() throws Exception {
int numQueued=0;
int numDN=cluster.getDataNodes().size();
FSDataOutputStream out=fs.create(TEST_FILE_PATH);
try {
AppendTestUtil.write(out,0,10);
out.hflush();
numQueued+=numDN;
}
finally {
IOUtils.closeStream(out);
numQueued+=numDN;
}
cluster.triggerBlockReports();
numQueued+=numDN;
try {
out=fs.append(TEST_FILE_PATH);
AppendTestUtil.write(out,10,10);
numQueued+=numDN;
}
finally {
IOUtils.closeStream(out);
numQueued+=numDN;
}
cluster.triggerBlockReports();
numQueued+=numDN;
assertEquals(numQueued,cluster.getNameNode(1).getNamesystem().getPendingDataNodeMessageCount());
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
BlockManagerTestUtil.updateState(nn2.getNamesystem().getBlockManager());
assertEquals(0,nn1.getNamesystem().getCorruptReplicaBlocks());
assertEquals(0,nn2.getNamesystem().getCorruptReplicaBlocks());
AppendTestUtil.check(fs,TEST_FILE_PATH,20);
}
InternalCallVerifierEqualityVerifier
/**
* Test case which restarts the standby node in such a way that,
* when it exits safemode, it will want to invalidate a bunch
* of over-replicated block replicas. Ensures that if we failover
* at this point it won't lose data.
*/
@Test public void testNNClearsCommandsOnFailoverAfterStartup() throws Exception {
DFSTestUtil.createFile(fs,TEST_FILE_PATH,30 * SMALL_BLOCK,(short)3,1L);
banner("Shutting down NN2");
cluster.shutdownNameNode(1);
banner("Setting replication to 1, rolling edit log.");
nn1.getRpcServer().setReplication(TEST_FILE,(short)1);
nn1.getRpcServer().rollEditLog();
banner("Starting NN2 again.");
cluster.restartNameNode(1);
nn2=cluster.getNameNode(1);
banner("triggering BRs");
cluster.triggerBlockReports();
banner("computing invalidation on nn1");
BlockManagerTestUtil.computeInvalidationWork(nn1.getNamesystem().getBlockManager());
banner("computing invalidation on nn2");
BlockManagerTestUtil.computeInvalidationWork(nn2.getNamesystem().getBlockManager());
banner("Metadata immediately before failover");
doMetasave(nn2);
banner("Failing to NN2 but let NN1 continue to think it's active");
NameNodeAdapter.abortEditLogs(nn1);
NameNodeAdapter.enterSafeMode(nn1,false);
cluster.transitionToActive(1);
assertEquals(1,nn2.getRpcServer().getFileInfo(TEST_FILE).getReplication());
banner("Metadata immediately after failover");
doMetasave(nn2);
banner("Triggering heartbeats and block reports so that fencing is completed");
cluster.triggerHeartbeats();
cluster.triggerBlockReports();
banner("Metadata after nodes have all block-reported");
doMetasave(nn2);
assertEquals(0,nn2.getNamesystem().getPostponedMisreplicatedBlocks());
BlockManagerTestUtil.computeInvalidationWork(nn2.getNamesystem().getBlockManager());
HATestUtil.waitForNNToIssueDeletions(nn2);
cluster.triggerHeartbeats();
HATestUtil.waitForDNDeletions(cluster);
cluster.triggerDeletionReports();
assertEquals(0,nn2.getNamesystem().getUnderReplicatedBlocks());
assertEquals(0,nn2.getNamesystem().getPendingReplicationBlocks());
banner("Making sure the file is still readable");
FileSystem fs2=cluster.getFileSystem(1);
DFSTestUtil.readFile(fs2,TEST_FILE_PATH);
}
InternalCallVerifierEqualityVerifier
/**
* Regression test for HDFS-2742. The issue in this bug was:
* - DN does a block report while file is open. This BR contains
* the block in RBW state.
* - Standby queues the RBW state in PendingDatanodeMessages
* - Standby processes edit logs during failover. Before fixing
* this bug, it was mistakenly applying the RBW reported state
* after the block had been completed, causing the block to get
* marked corrupt. Instead, we should now be applying the RBW
* message on OP_ADD, and then the FINALIZED message on OP_CLOSE.
*/
@Test public void testBlockReportsWhileFileBeingWritten() throws Exception {
FSDataOutputStream out=fs.create(TEST_FILE_PATH);
try {
AppendTestUtil.write(out,0,10);
out.hflush();
cluster.triggerBlockReports();
}
finally {
IOUtils.closeStream(out);
}
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
BlockManagerTestUtil.updateState(nn2.getNamesystem().getBlockManager());
assertEquals(0,nn1.getNamesystem().getCorruptReplicaBlocks());
assertEquals(0,nn2.getNamesystem().getCorruptReplicaBlocks());
DFSTestUtil.readFile(fs,TEST_FILE_PATH);
}
InternalCallVerifierEqualityVerifier
@Test public void testDnFencing() throws Exception {
DFSTestUtil.createFile(fs,TEST_FILE_PATH,30 * SMALL_BLOCK,(short)3,1L);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,TEST_FILE_PATH);
nn1.getRpcServer().setReplication(TEST_FILE,(short)1);
BlockManagerTestUtil.computeInvalidationWork(nn1.getNamesystem().getBlockManager());
cluster.triggerHeartbeats();
banner("Failing to NN2 but let NN1 continue to think it's active");
NameNodeAdapter.abortEditLogs(nn1);
NameNodeAdapter.enterSafeMode(nn1,false);
cluster.transitionToActive(1);
assertEquals(1,nn2.getRpcServer().getFileInfo(TEST_FILE).getReplication());
banner("NN2 Metadata immediately after failover");
doMetasave(nn2);
assertEquals(30,nn2.getNamesystem().getPostponedMisreplicatedBlocks());
banner("Triggering heartbeats and block reports so that fencing is completed");
cluster.triggerHeartbeats();
cluster.triggerBlockReports();
banner("Metadata after nodes have all block-reported");
doMetasave(nn2);
assertEquals(0,nn2.getNamesystem().getPostponedMisreplicatedBlocks());
BlockManagerTestUtil.computeInvalidationWork(nn2.getNamesystem().getBlockManager());
cluster.triggerHeartbeats();
HATestUtil.waitForDNDeletions(cluster);
cluster.triggerDeletionReports();
assertEquals(0,nn2.getNamesystem().getUnderReplicatedBlocks());
assertEquals(0,nn2.getNamesystem().getPendingReplicationBlocks());
banner("Making sure the file is still readable");
FileSystem fs2=cluster.getFileSystem(1);
DFSTestUtil.readFile(fs2,TEST_FILE_PATH);
banner("Waiting for the actual block files to get deleted from DNs.");
waitForTrueReplication(cluster,block,1);
}
InternalCallVerifierEqualityVerifier
/**
* Test case that reduces replication of a file with a lot of blocks
* and then fails over right after those blocks enter the DN invalidation
* queues on the active. Ensures that fencing is correct and no replicas
* are lost.
*/
@Test public void testNNClearsCommandsOnFailoverWithReplChanges() throws Exception {
DFSTestUtil.createFile(fs,TEST_FILE_PATH,30 * SMALL_BLOCK,(short)1,1L);
banner("rolling NN1's edit log, forcing catch-up");
HATestUtil.waitForStandbyToCatchUp(nn1,nn2);
nn1.getRpcServer().setReplication(TEST_FILE,(short)2);
while (BlockManagerTestUtil.getComputedDatanodeWork(nn1.getNamesystem().getBlockManager()) > 0) {
LOG.info("Getting more replication work computed");
}
BlockManager bm1=nn1.getNamesystem().getBlockManager();
while (bm1.getPendingReplicationBlocksCount() > 0) {
BlockManagerTestUtil.updateState(bm1);
cluster.triggerHeartbeats();
Thread.sleep(1000);
}
banner("triggering BRs");
cluster.triggerBlockReports();
nn1.getRpcServer().setReplication(TEST_FILE,(short)1);
banner("computing invalidation on nn1");
BlockManagerTestUtil.computeInvalidationWork(nn1.getNamesystem().getBlockManager());
doMetasave(nn1);
banner("computing invalidation on nn2");
BlockManagerTestUtil.computeInvalidationWork(nn2.getNamesystem().getBlockManager());
doMetasave(nn2);
banner("Metadata immediately before failover");
doMetasave(nn2);
banner("Failing to NN2 but let NN1 continue to think it's active");
NameNodeAdapter.abortEditLogs(nn1);
NameNodeAdapter.enterSafeMode(nn1,false);
BlockManagerTestUtil.computeInvalidationWork(nn2.getNamesystem().getBlockManager());
cluster.transitionToActive(1);
assertEquals(1,nn2.getRpcServer().getFileInfo(TEST_FILE).getReplication());
banner("Metadata immediately after failover");
doMetasave(nn2);
banner("Triggering heartbeats and block reports so that fencing is completed");
cluster.triggerHeartbeats();
cluster.triggerBlockReports();
banner("Metadata after nodes have all block-reported");
doMetasave(nn2);
assertEquals(0,nn2.getNamesystem().getPostponedMisreplicatedBlocks());
BlockManagerTestUtil.computeInvalidationWork(nn2.getNamesystem().getBlockManager());
HATestUtil.waitForNNToIssueDeletions(nn2);
cluster.triggerHeartbeats();
HATestUtil.waitForDNDeletions(cluster);
cluster.triggerDeletionReports();
assertEquals(0,nn2.getNamesystem().getUnderReplicatedBlocks());
assertEquals(0,nn2.getNamesystem().getPendingReplicationBlocks());
banner("Making sure the file is still readable");
FileSystem fs2=cluster.getFileSystem(1);
DFSTestUtil.readFile(fs2,TEST_FILE_PATH);
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* HDFS-3062: DistributedFileSystem.getCanonicalServiceName() throws an
* exception if the URI is a logical URI. This bug fails the combination of
* ha + mapred + security.
*/
@Test public void testDFSGetCanonicalServiceName() throws Exception {
URI hAUri=HATestUtil.getLogicalUri(cluster);
String haService=HAUtil.buildTokenServiceForLogicalUri(hAUri,HdfsConstants.HDFS_URI_SCHEME).toString();
assertEquals(haService,dfs.getCanonicalServiceName());
final String renewer=UserGroupInformation.getCurrentUser().getShortUserName();
final Token token=getDelegationToken(dfs,renewer);
assertEquals(haService,token.getService().toString());
token.renew(dfs.getConf());
token.cancel(dfs.getConf());
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that marking the shared edits dir as being "required" causes the NN to
* fail if that dir can't be accessed.
*/
@Test public void testFailureOfSharedDir() throws Exception {
Configuration conf=new Configuration();
conf.setLong(DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,2000);
MiniDFSCluster cluster=null;
File sharedEditsDir=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).checkExitOnShutdown(false).build();
cluster.waitActive();
cluster.transitionToActive(0);
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/test1")));
URI sharedEditsUri=cluster.getSharedEditsDir(0,1);
sharedEditsDir=new File(sharedEditsUri);
assertEquals(0,FileUtil.chmod(sharedEditsDir.getAbsolutePath(),"-w",true));
Thread.sleep(conf.getLong(DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT) * 2);
NameNode nn1=cluster.getNameNode(1);
assertTrue(nn1.isStandbyState());
assertFalse("StandBy NameNode should not go to SafeMode on resource unavailability",nn1.isInSafeMode());
NameNode nn0=cluster.getNameNode(0);
try {
nn0.getRpcServer().rollEditLog();
fail("Succeeded in rolling edit log despite shared dir being deleted");
}
catch ( ExitException ee) {
GenericTestUtils.assertExceptionContains("finalize log segment 1, 3 failed for required journal",ee);
}
for ( URI editsUri : cluster.getNameEditsDirs(0)) {
if (editsUri.equals(sharedEditsUri)) {
continue;
}
File editsDir=new File(editsUri.getPath());
File curDir=new File(editsDir,"current");
GenericTestUtils.assertGlobEquals(curDir,"edits_.*",NNStorage.getInProgressEditsFileName(1));
}
}
finally {
if (sharedEditsDir != null) {
FileUtil.chmod(sharedEditsDir.getAbsolutePath(),"+w",true);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* Multiple shared edits directories is an invalid configuration.
*/
@Test public void testMultipleSharedDirsFails() throws Exception {
Configuration conf=new Configuration();
URI sharedA=new URI("file:///shared-A");
URI sharedB=new URI("file:///shared-B");
URI localA=new URI("file:///local-A");
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,Joiner.on(",").join(sharedA,sharedB));
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,localA.toString());
try {
FSNamesystem.getNamespaceEditsDirs(conf);
fail("Allowed multiple shared edits directories");
}
catch ( IOException ioe) {
assertEquals("Multiple shared edits directories are not yet supported",ioe.getMessage());
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Make sure that the shared edits dirs are listed before non-shared dirs
* when the configuration is parsed. This ensures that the shared journals
* are synced before the local ones.
*/
@Test public void testSharedDirsComeFirstInEditsList() throws Exception {
Configuration conf=new Configuration();
URI sharedA=new URI("file:///shared-A");
URI localA=new URI("file:///local-A");
URI localB=new URI("file:///local-B");
URI localC=new URI("file:///local-C");
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,sharedA.toString());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,Joiner.on(",").join(localC,localB,localA));
List dirs=FSNamesystem.getNamespaceEditsDirs(conf);
assertEquals("Shared dirs should come first, then local dirs, in the order " + "they were listed in the configuration.",Joiner.on(",").join(sharedA,localC,localB,localA),Joiner.on(",").join(dirs));
}
InternalCallVerifierEqualityVerifier
/**
* Test to verify the processing of PendingDataNodeMessageQueue in case of
* append. One block will marked as corrupt if the OP_ADD, OP_UPDATE_BLOCKS
* comes in one edit log segment and OP_CLOSE edit comes in next log segment
* which is loaded during failover. Regression test for HDFS-3605.
*/
@Test public void testMultipleAppendsDuringCatchupTailing() throws Exception {
Configuration conf=new Configuration();
conf.set(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,"5000");
conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY,-1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3).build();
FileSystem fs=null;
try {
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
Path fileToAppend=new Path("/FileToAppend");
FSDataOutputStream out=fs.create(fileToAppend);
out.writeBytes("/data");
out.hflush();
cluster.getNameNode(0).getRpcServer().rollEditLog();
cluster.getNameNode(1).getNamesystem().getEditLogTailer().doTailEdits();
out.close();
for (int i=0; i < 5; i++) {
DFSTestUtil.appendFile(fs,fileToAppend,"data");
}
cluster.triggerBlockReports();
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
int rc=ToolRunner.run(new DFSck(cluster.getConfiguration(1)),new String[]{"/","-files","-blocks"});
assertEquals(0,rc);
assertEquals("CorruptBlocks should be empty.",0,cluster.getNameNode(1).getNamesystem().getCorruptReplicaBlocks());
}
finally {
if (null != cluster) {
cluster.shutdown();
}
if (null != fs) {
fs.close();
}
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Make sure that when we transition to active in safe mode that we don't
* prematurely consider blocks missing just because not all DNs have reported
* yet.
* This is a regression test for HDFS-3921.
*/
@Test public void testNoPopulatingReplQueuesWhenStartingActiveInSafeMode() throws IOException {
DFSTestUtil.createFile(fs,new Path("/test"),15 * BLOCK_SIZE,(short)3,1L);
cluster.stopDataNode(1);
cluster.restartNameNode(0,false);
cluster.transitionToActive(0);
assertTrue(cluster.getNameNode(0).isInSafeMode());
assertEquals(0,cluster.getNamesystem(0).getMissingBlocksCount());
}
InternalCallVerifierEqualityVerifier
/**
* Regression test for HDFS-2804: standby should not populate replication
* queues when exiting safe mode.
*/
@Test public void testNoPopulatingReplQueuesWhenExitingSafemode() throws Exception {
DFSTestUtil.createFile(fs,new Path("/test"),15 * BLOCK_SIZE,(short)3,1L);
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
nn1.getRpcServer().setSafeMode(SafeModeAction.SAFEMODE_ENTER,false);
NameNodeAdapter.saveNamespace(nn1);
nn1.getRpcServer().setSafeMode(SafeModeAction.SAFEMODE_LEAVE,false);
DFSTestUtil.createFile(fs,new Path("/test2"),15 * BLOCK_SIZE,(short)3,1L);
nn0.getRpcServer().rollEditLog();
cluster.stopDataNode(1);
cluster.shutdownNameNode(1);
cluster.restartNameNode(1,false);
nn1=cluster.getNameNode(1);
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
return !nn1.isInSafeMode();
}
}
,100,10000);
BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
assertEquals(0L,nn1.getNamesystem().getUnderReplicatedBlocks());
assertEquals(0L,nn1.getNamesystem().getPendingReplicationBlocks());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test for HDFS-2812. Since lease renewals go from the client
* only to the active NN, the SBN will have out-of-date lease
* info when it becomes active. We need to make sure we don't
* accidentally mark the leases as expired when the failover
* proceeds.
*/
@Test(timeout=120000) public void testLeasesRenewedOnTransition() throws Exception {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
FSDataOutputStream stm=null;
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
NameNode nn0=cluster.getNameNode(0);
NameNode nn1=cluster.getNameNode(1);
try {
cluster.waitActive();
cluster.transitionToActive(0);
LOG.info("Starting with NN 0 active");
stm=fs.create(TEST_FILE_PATH);
long nn0t0=NameNodeAdapter.getLeaseRenewalTime(nn0,TEST_FILE_STR);
assertTrue(nn0t0 > 0);
long nn1t0=NameNodeAdapter.getLeaseRenewalTime(nn1,TEST_FILE_STR);
assertEquals("Lease should not yet exist on nn1",-1,nn1t0);
Thread.sleep(5);
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
long nn1t1=NameNodeAdapter.getLeaseRenewalTime(nn1,TEST_FILE_STR);
assertTrue("Lease should have been created on standby. Time was: " + nn1t1,nn1t1 > nn0t0);
Thread.sleep(5);
LOG.info("Failing over to NN 1");
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
long nn1t2=NameNodeAdapter.getLeaseRenewalTime(nn1,TEST_FILE_STR);
assertTrue("Lease should have been renewed by failover process",nn1t2 > nn1t1);
}
finally {
IOUtils.closeStream(stm);
cluster.shutdown();
}
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* This test also serves to test{@link HAUtil#getProxiesForAllNameNodesInNameservice(Configuration,String)} and{@link DFSUtil#getRpcAddressesForNameserviceId(Configuration,String,String)}by virtue of the fact that it wouldn't work properly if the proxies
* returned were not for the correct NNs.
*/
@Test public void testIsAtLeastOneActive() throws Exception {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(new HdfsConfiguration()).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
try {
Configuration conf=new HdfsConfiguration();
HATestUtil.setFailoverConfigurations(cluster,conf);
List namenodes=HAUtil.getProxiesForAllNameNodesInNameservice(conf,HATestUtil.getLogicalHostname(cluster));
assertEquals(2,namenodes.size());
assertFalse(HAUtil.isAtLeastOneActive(namenodes));
cluster.transitionToActive(0);
assertTrue(HAUtil.isAtLeastOneActive(namenodes));
cluster.transitionToStandby(0);
assertFalse(HAUtil.isAtLeastOneActive(namenodes));
cluster.transitionToActive(1);
assertTrue(HAUtil.isAtLeastOneActive(namenodes));
cluster.transitionToStandby(1);
assertFalse(HAUtil.isAtLeastOneActive(namenodes));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* 1. Run a set of operations
* 2. Trigger the NN failover
* 3. Check the retry cache on the original standby NN
*/
@Test(timeout=60000) public void testRetryCacheOnStandbyNN() throws Exception {
DFSTestUtil.runOperations(cluster,dfs,conf,BlockSize,0);
FSNamesystem fsn0=cluster.getNamesystem(0);
LightWeightCache cacheSet=(LightWeightCache)fsn0.getRetryCache().getCacheSet();
assertEquals(23,cacheSet.size());
Map oldEntries=new HashMap();
Iterator iter=cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry=iter.next();
oldEntries.put(entry,entry);
}
cluster.getNameNode(0).getRpcServer().rollEditLog();
cluster.getNameNode(1).getNamesystem().getEditLogTailer().doTailEdits();
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
FSNamesystem fsn1=cluster.getNamesystem(1);
cacheSet=(LightWeightCache)fsn1.getRetryCache().getCacheSet();
assertEquals(23,cacheSet.size());
iter=cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry=iter.next();
assertTrue(oldEntries.containsKey(entry));
}
}
EqualityVerifier
/**
* Test for the case when both of the NNs in the cluster are
* in the standby state, and thus are both creating checkpoints
* and uploading them to each other.
* In this circumstance, they should receive the error from the
* other node indicating that the other node already has a
* checkpoint for the given txid, but this should not cause
* an abort, etc.
*/
@Test public void testBothNodesInStandbyState() throws Exception {
doEdits(0,10);
cluster.transitionToStandby(0);
HATestUtil.waitForCheckpoint(cluster,1,ImmutableList.of(12));
HATestUtil.waitForCheckpoint(cluster,0,ImmutableList.of(12));
assertEquals(12,nn0.getNamesystem().getFSImage().getMostRecentCheckpointTxId());
assertEquals(12,nn1.getNamesystem().getFSImage().getMostRecentCheckpointTxId());
List dirs=Lists.newArrayList();
dirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster,0));
dirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster,1));
FSImageTestUtil.assertParallelFilesAreIdentical(dirs,ImmutableSet.of());
}
InternalCallVerifierEqualityVerifier
/**
* Test cancellation of ongoing checkpoints when failover happens
* mid-checkpoint during image upload from standby to active NN.
*/
@Test(timeout=60000) public void testCheckpointCancellationDuringUpload() throws Exception {
cluster.getConfiguration(0).setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY,false);
cluster.getConfiguration(1).setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY,false);
cluster.getConfiguration(1).setLong(DFSConfigKeys.DFS_IMAGE_TRANSFER_RATE_KEY,100);
cluster.restartNameNode(0);
cluster.restartNameNode(1);
nn0=cluster.getNameNode(0);
nn1=cluster.getNameNode(1);
cluster.transitionToActive(0);
doEdits(0,100);
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
HATestUtil.waitForCheckpoint(cluster,1,ImmutableList.of(104));
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
cluster.shutdown();
cluster=null;
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
ThreadMXBean threadBean=ManagementFactory.getThreadMXBean();
ThreadInfo[] threads=threadBean.getThreadInfo(threadBean.getAllThreadIds(),1);
for ( ThreadInfo thread : threads) {
if (thread.getThreadName().startsWith("TransferFsImageUpload")) {
return false;
}
}
return true;
}
}
,1000,30000);
assertEquals(0,nn0.getFSImage().getMostRecentCheckpointTxId());
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Regression test for HDFS-2795:
* - Start an HA cluster with a DN.
* - Write several blocks to the FS with replication 1.
* - Shutdown the DN
* - Wait for the NNs to declare the DN dead. All blocks will be under-replicated.
* - Restart the DN.
* In the bug, the standby node would only very slowly notice the blocks returning
* to the cluster.
*/
@Test(timeout=60000) public void testDatanodeRestarts() throws Exception {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,1024);
HAUtil.setAllowStandbyReads(conf,true);
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,0);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
try {
NameNode nn0=cluster.getNameNode(0);
NameNode nn1=cluster.getNameNode(1);
cluster.transitionToActive(0);
DFSTestUtil.createFile(cluster.getFileSystem(0),TEST_FILE_PATH,5 * 1024,(short)1,1L);
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
DataNode dn=cluster.getDataNodes().get(0);
String dnName=dn.getDatanodeId().getXferAddr();
DataNodeProperties dnProps=cluster.stopDataNode(0);
BlockManagerTestUtil.noticeDeadDatanode(nn0,dnName);
BlockManagerTestUtil.noticeDeadDatanode(nn1,dnName);
BlockManagerTestUtil.updateState(nn0.getNamesystem().getBlockManager());
BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
assertEquals(5,nn0.getNamesystem().getUnderReplicatedBlocks());
assertEquals(0,nn1.getNamesystem().getUnderReplicatedBlocks());
LocatedBlocks locs=nn1.getRpcServer().getBlockLocations(TEST_FILE,0,1);
assertEquals("Standby should have registered that the block has no replicas",0,locs.get(0).getLocations().length);
cluster.restartDataNode(dnProps);
cluster.waitActive(0);
cluster.waitActive(1);
BlockManagerTestUtil.updateState(nn0.getNamesystem().getBlockManager());
BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
assertEquals(0,nn0.getNamesystem().getUnderReplicatedBlocks());
assertEquals(0,nn1.getNamesystem().getUnderReplicatedBlocks());
locs=nn1.getRpcServer().getBlockLocations(TEST_FILE,0,1);
assertEquals("Standby should have registered that the block has replicas again",1,locs.get(0).getLocations().length);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifierIterativeVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test the snapshot limit of a single snapshottable directory.
* @throws Exception
*/
@Test(timeout=300000) public void testSnapshotLimit() throws Exception {
final int step=1000;
final String dirStr="/testSnapshotLimit/dir";
final Path dir=new Path(dirStr);
hdfs.mkdirs(dir,new FsPermission((short)0777));
hdfs.allowSnapshot(dir);
int s=0;
for (; s < SNAPSHOT_LIMIT; s++) {
final String snapshotName="s" + s;
hdfs.createSnapshot(dir,snapshotName);
if (s % step == 0) {
final Path file=new Path(dirStr,"f" + s);
DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPLICATION,SEED);
}
}
try {
hdfs.createSnapshot(dir,"s" + s);
Assert.fail("Expected to fail to create snapshot, but didn't.");
}
catch ( IOException ioe) {
SnapshotTestHelper.LOG.info("The exception is expected.",ioe);
}
for (int f=0; f < SNAPSHOT_LIMIT; f+=step) {
final String file="f" + f;
s=RANDOM.nextInt(step);
for (; s < SNAPSHOT_LIMIT; s+=RANDOM.nextInt(step)) {
final Path p=SnapshotTestHelper.getSnapshotPath(dir,"s" + s,file);
Assert.assertEquals(s > f,hdfs.exists(p));
}
}
}
UtilityVerifierInternalCallVerifierBooleanVerifierIdentityVerifierEqualityVerifierHybridVerifier
@Test(timeout=300000) public void testSnapshotWithQuota() throws Exception {
final String dirStr="/testSnapshotWithQuota/dir";
final Path dir=new Path(dirStr);
hdfs.mkdirs(dir,new FsPermission((short)0777));
hdfs.allowSnapshot(dir);
final int NS_QUOTA=6;
hdfs.setQuota(dir,NS_QUOTA,HdfsConstants.QUOTA_DONT_SET);
final Path foo=new Path(dir,"foo");
final Path f1=new Path(foo,"f1");
DFSTestUtil.createFile(hdfs,f1,BLOCKSIZE,REPLICATION,SEED);
{
final Path snapshotPath=hdfs.createSnapshot(dir);
final String snapshotName=snapshotPath.getName();
Assert.assertTrue("snapshotName=" + snapshotName,Pattern.matches("s\\d\\d\\d\\d\\d\\d\\d\\d-\\d\\d\\d\\d\\d\\d\\.\\d\\d\\d",snapshotName));
final Path parent=snapshotPath.getParent();
Assert.assertEquals(HdfsConstants.DOT_SNAPSHOT_DIR,parent.getName());
Assert.assertEquals(dir,parent.getParent());
}
final Path f2=new Path(foo,"f2");
DFSTestUtil.createFile(hdfs,f2,BLOCKSIZE,REPLICATION,SEED);
try {
final Path f3=new Path(foo,"f3");
DFSTestUtil.createFile(hdfs,f3,BLOCKSIZE,REPLICATION,SEED);
Assert.fail();
}
catch ( NSQuotaExceededException e) {
SnapshotTestHelper.LOG.info("The exception is expected.",e);
}
try {
hdfs.createSnapshot(dir);
Assert.fail();
}
catch ( NSQuotaExceededException e) {
SnapshotTestHelper.LOG.info("The exception is expected.",e);
}
try {
hdfs.setPermission(f1,new FsPermission((short)0));
Assert.fail();
}
catch ( RemoteException e) {
Assert.assertSame(NSQuotaExceededException.class,e.unwrapRemoteException().getClass());
SnapshotTestHelper.LOG.info("The exception is expected.",e);
}
hdfs.setPermission(f2,new FsPermission((short)0));
hdfs.setQuota(dir,NS_QUOTA + 2,HdfsConstants.QUOTA_DONT_SET);
hdfs.createSnapshot(dir,"s1");
hdfs.setPermission(foo,new FsPermission((short)0444));
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test {@link Snapshot#ID_COMPARATOR}.
*/
@Test(timeout=300000) public void testIdCmp(){
final PermissionStatus perm=PermissionStatus.createImmutable("user","group",FsPermission.createImmutable((short)0));
final INodeDirectory snapshottable=new INodeDirectory(0,DFSUtil.string2Bytes("foo"),perm,0L);
snapshottable.addSnapshottableFeature();
final Snapshot[] snapshots={new Snapshot(1,"s1",snapshottable),new Snapshot(1,"s1",snapshottable),new Snapshot(2,"s2",snapshottable),new Snapshot(2,"s2",snapshottable)};
Assert.assertEquals(0,Snapshot.ID_COMPARATOR.compare(null,null));
for ( Snapshot s : snapshots) {
Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(null,s) > 0);
Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(s,null) < 0);
for ( Snapshot t : snapshots) {
final int expected=s.getRoot().getLocalName().compareTo(t.getRoot().getLocalName());
final int computed=Snapshot.ID_COMPARATOR.compare(s,t);
Assert.assertEquals(expected > 0,computed > 0);
Assert.assertEquals(expected == 0,computed == 0);
Assert.assertEquals(expected < 0,computed < 0);
}
}
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierIdentityVerifierEqualityVerifierHybridVerifier
@Test(timeout=300000) public void testRenameFromSDir2NonSDir() throws Exception {
final String dirStr="/testRenameWithSnapshot";
final String abcStr=dirStr + "/abc";
final Path abc=new Path(abcStr);
hdfs.mkdirs(abc,new FsPermission((short)0777));
hdfs.allowSnapshot(abc);
final Path foo=new Path(abc,"foo");
DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED);
hdfs.createSnapshot(abc,"s0");
try {
hdfs.rename(abc,new Path(dirStr,"tmp"));
fail("Expect exception since " + abc + " is snapshottable and already has snapshots");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains(abcStr + " is snapshottable and already has snapshots",e);
}
final String xyzStr=dirStr + "/xyz";
final Path xyz=new Path(xyzStr);
hdfs.mkdirs(xyz,new FsPermission((short)0777));
final Path bar=new Path(xyz,"bar");
hdfs.rename(foo,bar);
final INode fooRef=fsdir.getINode(SnapshotTestHelper.getSnapshotPath(abc,"s0","foo").toString());
Assert.assertTrue(fooRef.isReference());
Assert.assertTrue(fooRef.asReference() instanceof INodeReference.WithName);
final INodeReference.WithCount withCount=(INodeReference.WithCount)fooRef.asReference().getReferredINode();
Assert.assertEquals(2,withCount.getReferenceCount());
final INode barRef=fsdir.getINode(bar.toString());
Assert.assertTrue(barRef.isReference());
Assert.assertSame(withCount,barRef.asReference().getReferredINode());
hdfs.delete(bar,false);
Assert.assertEquals(1,withCount.getReferenceCount());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Make sure we clean the whole subtree under a DstReference node after
* deleting a snapshot.
* see HDFS-5476.
*/
@Test public void testCleanDstReference() throws Exception {
final Path test=new Path("/test");
final Path foo=new Path(test,"foo");
final Path bar=new Path(foo,"bar");
hdfs.mkdirs(bar);
SnapshotTestHelper.createSnapshot(hdfs,test,"s0");
final Path fileInBar=new Path(bar,"file");
DFSTestUtil.createFile(hdfs,fileInBar,BLOCKSIZE,REPL,SEED);
final Path foo2=new Path(test,"foo2");
hdfs.rename(foo,foo2);
hdfs.createSnapshot(test,"s1");
hdfs.delete(new Path(foo2,"bar"),true);
hdfs.delete(foo2,true);
final Path sfileInBar=SnapshotTestHelper.getSnapshotPath(test,"s1","foo2/bar/file");
assertTrue(hdfs.exists(sfileInBar));
hdfs.deleteSnapshot(test,"s1");
assertFalse(hdfs.exists(sfileInBar));
restartClusterAndCheckImage(true);
final Path barInS0=SnapshotTestHelper.getSnapshotPath(test,"s0","foo/bar");
INodeDirectory barNode=fsdir.getINode(barInS0.toString()).asDirectory();
assertEquals(0,barNode.getChildrenList(Snapshot.CURRENT_STATE_ID).size());
List diffList=barNode.getDiffs().asList();
assertEquals(1,diffList.size());
DirectoryDiff diff=diffList.get(0);
assertEquals(0,diff.getChildrenDiff().getList(ListType.DELETED).size());
assertEquals(0,diff.getChildrenDiff().getList(ListType.CREATED).size());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test the undo section of rename. Before the rename, we create the renamed
* file/dir before taking the snapshot.
*/
@Test public void testRenameUndo_1() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
final Path dir2file=new Path(sdir2,"file");
DFSTestUtil.createFile(hdfs,dir2file,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
INodeDirectory dir2=fsdir.getINode4Write(sdir2.toString()).asDirectory();
INodeDirectory mockDir2=spy(dir2);
doReturn(false).when(mockDir2).addChild((INode)anyObject(),anyBoolean(),Mockito.anyInt());
INodeDirectory root=fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir2,mockDir2,fsdir.getINodeMap());
final Path newfoo=new Path(sdir2,"foo");
boolean result=hdfs.rename(foo,newfoo);
assertFalse(result);
INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory();
Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
ReadOnlyList dir1Children=dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir1Children.size());
assertEquals(foo.getName(),dir1Children.get(0).getLocalName());
List dir1Diffs=dir1Node.getDiffs().asList();
assertEquals(1,dir1Diffs.size());
assertEquals(s1.getId(),dir1Diffs.get(0).getSnapshotId());
ChildrenDiff childrenDiff=dir1Diffs.get(0).getChildrenDiff();
assertEquals(0,childrenDiff.getList(ListType.DELETED).size());
assertEquals(0,childrenDiff.getList(ListType.CREATED).size());
INode fooNode=fsdir.getINode4Write(foo.toString());
assertTrue(fooNode.isDirectory() && fooNode.asDirectory().isWithSnapshot());
List fooDiffs=fooNode.asDirectory().getDiffs().asList();
assertEquals(1,fooDiffs.size());
assertEquals(s1.getId(),fooDiffs.get(0).getSnapshotId());
final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo");
INode fooNode_s1=fsdir.getINode(foo_s1.toString());
assertTrue(fooNode_s1 == fooNode);
assertFalse(hdfs.exists(newfoo));
INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
assertFalse(dir2Node.isWithSnapshot());
ReadOnlyList dir2Children=dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir2Children.size());
assertEquals(dir2file.getName(),dir2Children.get(0).getLocalName());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierIdentityVerifierEqualityVerifierHybridVerifier
/**
* Unit test for HDFS-4842.
*/
@Test public void testRenameDirAndDeleteSnapshot_7() throws Exception {
fsn.getSnapshotManager().setAllowNestedSnapshots(true);
final Path test=new Path("/test");
final Path dir1=new Path(test,"dir1");
final Path dir2=new Path(test,"dir2");
hdfs.mkdirs(dir1);
hdfs.mkdirs(dir2);
final Path foo=new Path(dir2,"foo");
final Path bar=new Path(foo,"bar");
final Path file=new Path(bar,"file");
DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,test,"s0");
SnapshotTestHelper.createSnapshot(hdfs,test,"s1");
hdfs.delete(file,true);
SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2");
final Path newfoo=new Path(dir1,foo.getName());
hdfs.rename(foo,newfoo);
hdfs.deleteSnapshot(test,"s1");
final Path file_s2=SnapshotTestHelper.getSnapshotPath(dir2,"s2","foo/bar/file");
assertFalse(hdfs.exists(file_s2));
final Path file_s0=SnapshotTestHelper.getSnapshotPath(test,"s0","dir2/foo/bar/file");
assertTrue(hdfs.exists(file_s0));
INodeDirectory dir1Node=fsdir.getINode4Write(dir1.toString()).asDirectory();
List dir1DiffList=dir1Node.getDiffs().asList();
assertEquals(1,dir1DiffList.size());
List dList=dir1DiffList.get(0).getChildrenDiff().getList(ListType.DELETED);
assertTrue(dList.isEmpty());
List cList=dir1DiffList.get(0).getChildrenDiff().getList(ListType.CREATED);
assertEquals(1,cList.size());
INode cNode=cList.get(0);
INode fooNode=fsdir.getINode4Write(newfoo.toString());
assertSame(cNode,fooNode);
final Path newbar=new Path(newfoo,bar.getName());
INodeDirectory barNode=fsdir.getINode4Write(newbar.toString()).asDirectory();
assertSame(fooNode.asDirectory(),barNode.getParent());
List barDiffList=barNode.getDiffs().asList();
assertEquals(1,barDiffList.size());
DirectoryDiff diff=barDiffList.get(0);
INodeDirectory testNode=fsdir.getINode4Write(test.toString()).asDirectory();
Snapshot s0=testNode.getSnapshot(DFSUtil.string2Bytes("s0"));
assertEquals(s0.getId(),diff.getSnapshotId());
assertEquals("file",diff.getChildrenDiff().getList(ListType.DELETED).get(0).getLocalName());
INodeDirectory dir2Node=fsdir.getINode4Write(dir2.toString()).asDirectory();
List dir2DiffList=dir2Node.getDiffs().asList();
assertEquals(1,dir2DiffList.size());
dList=dir2DiffList.get(0).getChildrenDiff().getList(ListType.DELETED);
assertEquals(1,dList.size());
final Path foo_s2=SnapshotTestHelper.getSnapshotPath(dir2,"s2",foo.getName());
INodeReference.WithName fooNode_s2=(INodeReference.WithName)fsdir.getINode(foo_s2.toString());
assertSame(dList.get(0),fooNode_s2);
assertSame(fooNode.asReference().getReferredINode(),fooNode_s2.getReferredINode());
restartClusterAndCheckImage(true);
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test the undo section of rename. Before the rename, we create the renamed
* file/dir after taking the snapshot.
*/
@Test public void testRenameUndo_2() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path dir2file=new Path(sdir2,"file");
DFSTestUtil.createFile(hdfs,dir2file,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
INodeDirectory dir2=fsdir.getINode4Write(sdir2.toString()).asDirectory();
INodeDirectory mockDir2=spy(dir2);
doReturn(false).when(mockDir2).addChild((INode)anyObject(),anyBoolean(),Mockito.anyInt());
INodeDirectory root=fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir2,mockDir2,fsdir.getINodeMap());
final Path newfoo=new Path(sdir2,"foo");
boolean result=hdfs.rename(foo,newfoo);
assertFalse(result);
INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory();
Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
ReadOnlyList dir1Children=dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir1Children.size());
assertEquals(foo.getName(),dir1Children.get(0).getLocalName());
List dir1Diffs=dir1Node.getDiffs().asList();
assertEquals(1,dir1Diffs.size());
assertEquals(s1.getId(),dir1Diffs.get(0).getSnapshotId());
ChildrenDiff childrenDiff=dir1Diffs.get(0).getChildrenDiff();
assertEquals(0,childrenDiff.getList(ListType.DELETED).size());
assertEquals(1,childrenDiff.getList(ListType.CREATED).size());
INode fooNode=fsdir.getINode4Write(foo.toString());
assertTrue(fooNode instanceof INodeDirectory);
assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo");
assertFalse(hdfs.exists(foo_s1));
assertFalse(hdfs.exists(newfoo));
INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
assertFalse(dir2Node.isWithSnapshot());
ReadOnlyList dir2Children=dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir2Children.size());
assertEquals(dir2file.getName(),dir2Children.get(0).getLocalName());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test rename a dir and a file multiple times across snapshottable
* directories: /dir1/foo -> /dir2/foo -> /dir3/foo -> /dir2/foo -> /dir1/foo
* Only create snapshots in the beginning (before the rename).
*/
@Test public void testRenameMoreThanOnceAcrossSnapDirs() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path sdir3=new Path("/dir3");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
hdfs.mkdirs(sdir3);
final Path foo_dir1=new Path(sdir1,"foo");
final Path bar1_dir1=new Path(foo_dir1,"bar1");
final Path bar2_dir1=new Path(sdir1,"bar");
DFSTestUtil.createFile(hdfs,bar1_dir1,BLOCKSIZE,REPL,SEED);
DFSTestUtil.createFile(hdfs,bar2_dir1,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
SnapshotTestHelper.createSnapshot(hdfs,sdir3,"s3");
final Path foo_dir2=new Path(sdir2,"foo");
hdfs.rename(foo_dir1,foo_dir2);
final Path bar2_dir2=new Path(sdir2,"bar");
hdfs.rename(bar2_dir1,bar2_dir2);
restartClusterAndCheckImage(true);
final Path bar1_dir2=new Path(foo_dir2,"bar1");
hdfs.setReplication(bar1_dir2,REPL_1);
hdfs.setReplication(bar2_dir2,REPL_1);
final Path bar1_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo/bar1");
final Path bar2_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","bar");
final Path bar1_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar1");
final Path bar2_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar2_s1));
assertFalse(hdfs.exists(bar1_s2));
assertFalse(hdfs.exists(bar2_s2));
FileStatus statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_dir2);
assertEquals(REPL_1,statusBar1.getReplication());
FileStatus statusBar2=hdfs.getFileStatus(bar2_s1);
assertEquals(REPL,statusBar2.getReplication());
statusBar2=hdfs.getFileStatus(bar2_dir2);
assertEquals(REPL_1,statusBar2.getReplication());
final Path foo_dir3=new Path(sdir3,"foo");
hdfs.rename(foo_dir2,foo_dir3);
final Path bar2_dir3=new Path(sdir3,"bar");
hdfs.rename(bar2_dir2,bar2_dir3);
restartClusterAndCheckImage(true);
final Path bar1_dir3=new Path(foo_dir3,"bar1");
hdfs.setReplication(bar1_dir3,REPL_2);
hdfs.setReplication(bar2_dir3,REPL_2);
final Path bar1_s3=SnapshotTestHelper.getSnapshotPath(sdir3,"s3","foo/bar1");
final Path bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir3,"s3","bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar2_s1));
assertFalse(hdfs.exists(bar1_s2));
assertFalse(hdfs.exists(bar2_s2));
assertFalse(hdfs.exists(bar1_s3));
assertFalse(hdfs.exists(bar2_s3));
statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_dir3);
assertEquals(REPL_2,statusBar1.getReplication());
statusBar2=hdfs.getFileStatus(bar2_s1);
assertEquals(REPL,statusBar2.getReplication());
statusBar2=hdfs.getFileStatus(bar2_dir3);
assertEquals(REPL_2,statusBar2.getReplication());
hdfs.rename(foo_dir3,foo_dir2);
hdfs.rename(bar2_dir3,bar2_dir2);
restartClusterAndCheckImage(true);
hdfs.setReplication(bar1_dir2,REPL);
hdfs.setReplication(bar2_dir2,REPL);
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar2_s1));
assertFalse(hdfs.exists(bar1_s2));
assertFalse(hdfs.exists(bar2_s2));
assertFalse(hdfs.exists(bar1_s3));
assertFalse(hdfs.exists(bar2_s3));
statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_dir2);
assertEquals(REPL,statusBar1.getReplication());
statusBar2=hdfs.getFileStatus(bar2_s1);
assertEquals(REPL,statusBar2.getReplication());
statusBar2=hdfs.getFileStatus(bar2_dir2);
assertEquals(REPL,statusBar2.getReplication());
hdfs.rename(foo_dir2,foo_dir1);
hdfs.rename(bar2_dir2,bar2_dir1);
INodeReference fooRef=fsdir.getINode4Write(foo_dir1.toString()).asReference();
INodeReference.WithCount fooWithCount=(WithCount)fooRef.getReferredINode();
assertEquals(2,fooWithCount.getReferenceCount());
INodeDirectory foo=fooWithCount.asDirectory();
assertEquals(1,foo.getDiffs().asList().size());
INodeDirectory sdir1Node=fsdir.getINode(sdir1.toString()).asDirectory();
Snapshot s1=sdir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(),foo.getDirectoryWithSnapshotFeature().getLastSnapshotId());
INodeFile bar1=fsdir.getINode4Write(bar1_dir1.toString()).asFile();
assertEquals(1,bar1.getDiffs().asList().size());
assertEquals(s1.getId(),bar1.getDiffs().getLastSnapshotId());
INodeReference barRef=fsdir.getINode4Write(bar2_dir1.toString()).asReference();
INodeReference.WithCount barWithCount=(WithCount)barRef.getReferredINode();
assertEquals(2,barWithCount.getReferenceCount());
INodeFile bar=barWithCount.asFile();
assertEquals(1,bar.getDiffs().asList().size());
assertEquals(s1.getId(),bar.getDiffs().getLastSnapshotId());
restartClusterAndCheckImage(true);
hdfs.delete(foo_dir1,true);
hdfs.delete(bar2_dir1,true);
restartClusterAndCheckImage(true);
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar2_s1));
assertFalse(hdfs.exists(bar1_s2));
assertFalse(hdfs.exists(bar2_s2));
assertFalse(hdfs.exists(bar1_s3));
assertFalse(hdfs.exists(bar2_s3));
assertFalse(hdfs.exists(foo_dir1));
assertFalse(hdfs.exists(bar1_dir1));
assertFalse(hdfs.exists(bar2_dir1));
statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar2=hdfs.getFileStatus(bar2_s1);
assertEquals(REPL,statusBar2.getReplication());
final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo");
fooRef=fsdir.getINode(foo_s1.toString()).asReference();
fooWithCount=(WithCount)fooRef.getReferredINode();
assertEquals(1,fooWithCount.getReferenceCount());
barRef=fsdir.getINode(bar2_s1.toString()).asReference();
barWithCount=(WithCount)barRef.getReferredINode();
assertEquals(1,barWithCount.getReferenceCount());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test the undo section of the second-time rename.
*/
@Test public void testRenameUndo_3() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path sdir3=new Path("/dir3");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
hdfs.mkdirs(sdir3);
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
INodeDirectory dir3=fsdir.getINode4Write(sdir3.toString()).asDirectory();
INodeDirectory mockDir3=spy(dir3);
doReturn(false).when(mockDir3).addChild((INode)anyObject(),anyBoolean(),Mockito.anyInt());
INodeDirectory root=fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir3,mockDir3,fsdir.getINodeMap());
final Path foo_dir2=new Path(sdir2,"foo2");
final Path foo_dir3=new Path(sdir3,"foo3");
hdfs.rename(foo,foo_dir2);
boolean result=hdfs.rename(foo_dir2,foo_dir3);
assertFalse(result);
INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory();
Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
Snapshot s2=dir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
ReadOnlyList dir2Children=dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir2Children.size());
List dir2Diffs=dir2Node.getDiffs().asList();
assertEquals(1,dir2Diffs.size());
assertEquals(s2.getId(),dir2Diffs.get(0).getSnapshotId());
ChildrenDiff childrenDiff=dir2Diffs.get(0).getChildrenDiff();
assertEquals(0,childrenDiff.getList(ListType.DELETED).size());
assertEquals(1,childrenDiff.getList(ListType.CREATED).size());
final Path foo_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo2");
assertFalse(hdfs.exists(foo_s2));
INode fooNode=fsdir.getINode4Write(foo_dir2.toString());
assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
assertTrue(fooNode instanceof INodeReference.DstReference);
List fooDiffs=fooNode.asDirectory().getDiffs().asList();
assertEquals(1,fooDiffs.size());
assertEquals(s1.getId(),fooDiffs.get(0).getSnapshotId());
hdfs.createSnapshot(sdir2,"s3");
result=hdfs.rename(foo_dir2,foo_dir3);
assertFalse(result);
dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
Snapshot s3=dir2Node.getSnapshot(DFSUtil.string2Bytes("s3"));
fooNode=fsdir.getINode4Write(foo_dir2.toString());
dir2Children=dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir2Children.size());
dir2Diffs=dir2Node.getDiffs().asList();
assertEquals(2,dir2Diffs.size());
assertEquals(s2.getId(),dir2Diffs.get(0).getSnapshotId());
assertEquals(s3.getId(),dir2Diffs.get(1).getSnapshotId());
childrenDiff=dir2Diffs.get(0).getChildrenDiff();
assertEquals(0,childrenDiff.getList(ListType.DELETED).size());
assertEquals(1,childrenDiff.getList(ListType.CREATED).size());
assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
childrenDiff=dir2Diffs.get(1).getChildrenDiff();
assertEquals(0,childrenDiff.getList(ListType.DELETED).size());
assertEquals(0,childrenDiff.getList(ListType.CREATED).size());
final Path foo_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo2");
assertFalse(hdfs.exists(foo_s2));
assertTrue(hdfs.exists(foo_s3));
assertTrue(fooNode instanceof INodeReference.DstReference);
fooDiffs=fooNode.asDirectory().getDiffs().asList();
assertEquals(2,fooDiffs.size());
assertEquals(s1.getId(),fooDiffs.get(0).getSnapshotId());
assertEquals(s3.getId(),fooDiffs.get(1).getSnapshotId());
}
UtilityVerifierInternalCallVerifierBooleanVerifierIdentityVerifierEqualityVerifierHybridVerifier
/**
* Test undo where dst node being overwritten is a reference node
*/
@Test public void testRenameUndo_4() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path sdir3=new Path("/dir3");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
hdfs.mkdirs(sdir3);
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
final Path foo2=new Path(sdir2,"foo2");
hdfs.mkdirs(foo2);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
final Path foo3=new Path(sdir3,"foo3");
hdfs.rename(foo2,foo3);
INode foo3Node=fsdir.getINode4Write(foo3.toString());
assertTrue(foo3Node.isReference());
INodeDirectory dir3=fsdir.getINode4Write(sdir3.toString()).asDirectory();
INodeDirectory mockDir3=spy(dir3);
doReturn(false).when(mockDir3).addChild((INode)Mockito.isNull(),anyBoolean(),Mockito.anyInt());
Mockito.when(mockDir3.addChild((INode)Mockito.isNotNull(),anyBoolean(),Mockito.anyInt())).thenReturn(false).thenCallRealMethod();
INodeDirectory root=fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir3,mockDir3,fsdir.getINodeMap());
foo3Node.setParent(mockDir3);
try {
hdfs.rename(foo,foo3,Rename.OVERWRITE);
fail("the rename from " + foo + " to "+ foo3+ " should fail");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("rename from " + foo + " to "+ foo3+ " failed.",e);
}
final INode foo3Node_undo=fsdir.getINode4Write(foo3.toString());
assertSame(foo3Node,foo3Node_undo);
INodeReference.WithCount foo3_wc=(WithCount)foo3Node.asReference().getReferredINode();
assertEquals(2,foo3_wc.getReferenceCount());
assertSame(foo3Node,foo3_wc.getParentReference());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierIdentityVerifierEqualityVerifierHybridVerifier
/**
* After the following operations:
* Rename a dir -> create a snapshot s on dst tree -> rename the renamed dir
* again -> delete snapshot s on dst tree
* Make sure we only delete the snapshot s under the renamed dir.
*/
@Test public void testRenameDirAndDeleteSnapshot_4() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
hdfs.mkdirs(sdir2);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
final Path foo2=new Path(sdir2,"foo");
hdfs.rename(foo,foo2);
final Path bar2=new Path(foo2,"bar2");
DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED);
final Path bar3=new Path(foo2,"bar3");
DFSTestUtil.createFile(hdfs,bar3,BLOCKSIZE,REPL,SEED);
hdfs.createSnapshot(sdir2,"s3");
hdfs.rename(foo2,foo);
hdfs.deleteSnapshot(sdir2,"s3");
final INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory();
Quota.Counts q1=dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(9,q1.get(Quota.NAMESPACE));
final INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
Quota.Counts q2=dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(2,q2.get(Quota.NAMESPACE));
final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1",foo.getName());
final INode fooRef=fsdir.getINode(foo_s1.toString());
assertTrue(fooRef instanceof INodeReference.WithName);
INodeReference.WithCount wc=(WithCount)fooRef.asReference().getReferredINode();
assertEquals(2,wc.getReferenceCount());
INodeDirectory fooNode=wc.getReferredINode().asDirectory();
ReadOnlyList children=fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(3,children.size());
assertEquals(bar.getName(),children.get(0).getLocalName());
assertEquals(bar2.getName(),children.get(1).getLocalName());
assertEquals(bar3.getName(),children.get(2).getLocalName());
List diffList=fooNode.getDiffs().asList();
assertEquals(1,diffList.size());
Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(),diffList.get(0).getSnapshotId());
ChildrenDiff diff=diffList.get(0).getChildrenDiff();
assertEquals(2,diff.getList(ListType.CREATED).size());
assertEquals(0,diff.getList(ListType.DELETED).size());
final INode fooRef2=fsdir.getINode4Write(foo.toString());
assertTrue(fooRef2 instanceof INodeReference.DstReference);
INodeReference.WithCount wc2=(WithCount)fooRef2.asReference().getReferredINode();
assertSame(wc,wc2);
assertSame(fooRef2,wc.getParentReference());
restartClusterAndCheckImage(true);
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test rename a dir multiple times across snapshottable directories:
* /dir1/foo -> /dir2/foo -> /dir3/foo -> /dir2/foo -> /dir1/foo
* Create snapshots after each rename.
*/
@Test public void testRenameMoreThanOnceAcrossSnapDirs_2() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path sdir3=new Path("/dir3");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
hdfs.mkdirs(sdir3);
final Path foo_dir1=new Path(sdir1,"foo");
final Path bar1_dir1=new Path(foo_dir1,"bar1");
final Path bar_dir1=new Path(sdir1,"bar");
DFSTestUtil.createFile(hdfs,bar1_dir1,BLOCKSIZE,REPL,SEED);
DFSTestUtil.createFile(hdfs,bar_dir1,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
SnapshotTestHelper.createSnapshot(hdfs,sdir3,"s3");
final Path foo_dir2=new Path(sdir2,"foo");
hdfs.rename(foo_dir1,foo_dir2);
final Path bar_dir2=new Path(sdir2,"bar");
hdfs.rename(bar_dir1,bar_dir2);
final Path bar1_dir2=new Path(foo_dir2,"bar1");
hdfs.setReplication(bar1_dir2,REPL_1);
hdfs.setReplication(bar_dir2,REPL_1);
restartClusterAndCheckImage(true);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s11");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s22");
SnapshotTestHelper.createSnapshot(hdfs,sdir3,"s33");
final Path foo_dir3=new Path(sdir3,"foo");
hdfs.rename(foo_dir2,foo_dir3);
final Path bar_dir3=new Path(sdir3,"bar");
hdfs.rename(bar_dir2,bar_dir3);
final Path bar1_dir3=new Path(foo_dir3,"bar1");
hdfs.setReplication(bar1_dir3,REPL_2);
hdfs.setReplication(bar_dir3,REPL_2);
restartClusterAndCheckImage(true);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s111");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s222");
SnapshotTestHelper.createSnapshot(hdfs,sdir3,"s333");
final Path bar1_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo/bar1");
final Path bar1_s22=SnapshotTestHelper.getSnapshotPath(sdir2,"s22","foo/bar1");
final Path bar1_s333=SnapshotTestHelper.getSnapshotPath(sdir3,"s333","foo/bar1");
final Path bar_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","bar");
final Path bar_s22=SnapshotTestHelper.getSnapshotPath(sdir2,"s22","bar");
final Path bar_s333=SnapshotTestHelper.getSnapshotPath(sdir3,"s333","bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar1_s22));
assertTrue(hdfs.exists(bar1_s333));
assertTrue(hdfs.exists(bar_s1));
assertTrue(hdfs.exists(bar_s22));
assertTrue(hdfs.exists(bar_s333));
FileStatus statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_dir3);
assertEquals(REPL_2,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_s22);
assertEquals(REPL_1,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_s333);
assertEquals(REPL_2,statusBar1.getReplication());
FileStatus statusBar=hdfs.getFileStatus(bar_s1);
assertEquals(REPL,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_dir3);
assertEquals(REPL_2,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_s22);
assertEquals(REPL_1,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_s333);
assertEquals(REPL_2,statusBar.getReplication());
hdfs.rename(foo_dir3,foo_dir2);
hdfs.rename(bar_dir3,bar_dir2);
hdfs.setReplication(bar1_dir2,REPL);
hdfs.setReplication(bar_dir2,REPL);
restartClusterAndCheckImage(true);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1111");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2222");
final Path bar1_s2222=SnapshotTestHelper.getSnapshotPath(sdir2,"s2222","foo/bar1");
final Path bar_s2222=SnapshotTestHelper.getSnapshotPath(sdir2,"s2222","bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar1_s22));
assertTrue(hdfs.exists(bar1_s333));
assertTrue(hdfs.exists(bar1_s2222));
assertTrue(hdfs.exists(bar_s1));
assertTrue(hdfs.exists(bar_s22));
assertTrue(hdfs.exists(bar_s333));
assertTrue(hdfs.exists(bar_s2222));
statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_dir2);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_s22);
assertEquals(REPL_1,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_s333);
assertEquals(REPL_2,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_s2222);
assertEquals(REPL,statusBar1.getReplication());
statusBar=hdfs.getFileStatus(bar_s1);
assertEquals(REPL,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_dir2);
assertEquals(REPL,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_s22);
assertEquals(REPL_1,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_s333);
assertEquals(REPL_2,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_s2222);
assertEquals(REPL,statusBar.getReplication());
hdfs.rename(foo_dir2,foo_dir1);
hdfs.rename(bar_dir2,bar_dir1);
INodeDirectory sdir1Node=fsdir.getINode(sdir1.toString()).asDirectory();
INodeDirectory sdir2Node=fsdir.getINode(sdir2.toString()).asDirectory();
INodeDirectory sdir3Node=fsdir.getINode(sdir3.toString()).asDirectory();
INodeReference fooRef=fsdir.getINode4Write(foo_dir1.toString()).asReference();
INodeReference.WithCount fooWithCount=(WithCount)fooRef.getReferredINode();
assertEquals(5,fooWithCount.getReferenceCount());
INodeDirectory foo=fooWithCount.asDirectory();
List fooDiffs=foo.getDiffs().asList();
assertEquals(4,fooDiffs.size());
Snapshot s2222=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2222"));
Snapshot s333=sdir3Node.getSnapshot(DFSUtil.string2Bytes("s333"));
Snapshot s22=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s22"));
Snapshot s1=sdir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s2222.getId(),fooDiffs.get(3).getSnapshotId());
assertEquals(s333.getId(),fooDiffs.get(2).getSnapshotId());
assertEquals(s22.getId(),fooDiffs.get(1).getSnapshotId());
assertEquals(s1.getId(),fooDiffs.get(0).getSnapshotId());
INodeFile bar1=fsdir.getINode4Write(bar1_dir1.toString()).asFile();
List bar1Diffs=bar1.getDiffs().asList();
assertEquals(3,bar1Diffs.size());
assertEquals(s333.getId(),bar1Diffs.get(2).getSnapshotId());
assertEquals(s22.getId(),bar1Diffs.get(1).getSnapshotId());
assertEquals(s1.getId(),bar1Diffs.get(0).getSnapshotId());
INodeReference barRef=fsdir.getINode4Write(bar_dir1.toString()).asReference();
INodeReference.WithCount barWithCount=(WithCount)barRef.getReferredINode();
assertEquals(5,barWithCount.getReferenceCount());
INodeFile bar=barWithCount.asFile();
List barDiffs=bar.getDiffs().asList();
assertEquals(4,barDiffs.size());
assertEquals(s2222.getId(),barDiffs.get(3).getSnapshotId());
assertEquals(s333.getId(),barDiffs.get(2).getSnapshotId());
assertEquals(s22.getId(),barDiffs.get(1).getSnapshotId());
assertEquals(s1.getId(),barDiffs.get(0).getSnapshotId());
restartClusterAndCheckImage(true);
hdfs.delete(foo_dir1,true);
hdfs.delete(bar_dir1,true);
restartClusterAndCheckImage(true);
final Path bar1_s1111=SnapshotTestHelper.getSnapshotPath(sdir1,"s1111","foo/bar1");
final Path bar_s1111=SnapshotTestHelper.getSnapshotPath(sdir1,"s1111","bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar1_s22));
assertTrue(hdfs.exists(bar1_s333));
assertTrue(hdfs.exists(bar1_s2222));
assertFalse(hdfs.exists(bar1_s1111));
assertTrue(hdfs.exists(bar_s1));
assertTrue(hdfs.exists(bar_s22));
assertTrue(hdfs.exists(bar_s333));
assertTrue(hdfs.exists(bar_s2222));
assertFalse(hdfs.exists(bar_s1111));
final Path foo_s2222=SnapshotTestHelper.getSnapshotPath(sdir2,"s2222","foo");
fooRef=fsdir.getINode(foo_s2222.toString()).asReference();
fooWithCount=(WithCount)fooRef.getReferredINode();
assertEquals(4,fooWithCount.getReferenceCount());
foo=fooWithCount.asDirectory();
fooDiffs=foo.getDiffs().asList();
assertEquals(4,fooDiffs.size());
assertEquals(s2222.getId(),fooDiffs.get(3).getSnapshotId());
bar1Diffs=bar1.getDiffs().asList();
assertEquals(3,bar1Diffs.size());
assertEquals(s333.getId(),bar1Diffs.get(2).getSnapshotId());
barRef=fsdir.getINode(bar_s2222.toString()).asReference();
barWithCount=(WithCount)barRef.getReferredINode();
assertEquals(4,barWithCount.getReferenceCount());
bar=barWithCount.asFile();
barDiffs=bar.getDiffs().asList();
assertEquals(4,barDiffs.size());
assertEquals(s2222.getId(),barDiffs.get(3).getSnapshotId());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierIdentityVerifierEqualityVerifierHybridVerifier
/**
* Test rename while the rename operation will exceed the quota in the dst
* tree.
*/
@Test public void testRenameUndo_5() throws Exception {
final Path test=new Path("/test");
final Path dir1=new Path(test,"dir1");
final Path dir2=new Path(test,"dir2");
final Path subdir2=new Path(dir2,"subdir2");
hdfs.mkdirs(dir1);
hdfs.mkdirs(subdir2);
final Path foo=new Path(dir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,dir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2");
hdfs.setQuota(dir2,5,Long.MAX_VALUE - 1);
final Path foo2=new Path(subdir2,foo.getName());
boolean rename=hdfs.rename(foo,foo2);
assertFalse(rename);
assertTrue(hdfs.exists(foo));
assertTrue(hdfs.exists(bar));
INodeDirectory dir1Node=fsdir.getINode4Write(dir1.toString()).asDirectory();
List childrenList=ReadOnlyList.Util.asList(dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1,childrenList.size());
INode fooNode=childrenList.get(0);
assertTrue(fooNode.asDirectory().isWithSnapshot());
INode barNode=fsdir.getINode4Write(bar.toString());
assertTrue(barNode.getClass() == INodeFile.class);
assertSame(fooNode,barNode.getParent());
List diffList=dir1Node.getDiffs().asList();
assertEquals(1,diffList.size());
DirectoryDiff diff=diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
INodeDirectory dir2Node=fsdir.getINode4Write(dir2.toString()).asDirectory();
assertTrue(dir2Node.isSnapshottable());
Quota.Counts counts=dir2Node.computeQuotaUsage();
assertEquals(3,counts.get(Quota.NAMESPACE));
assertEquals(0,counts.get(Quota.DISKSPACE));
childrenList=ReadOnlyList.Util.asList(dir2Node.asDirectory().getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1,childrenList.size());
INode subdir2Node=childrenList.get(0);
assertSame(dir2Node,subdir2Node.getParent());
assertSame(subdir2Node,fsdir.getINode4Write(subdir2.toString()));
diffList=dir2Node.getDiffs().asList();
assertEquals(1,diffList.size());
diff=diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* After the following operations:
* Rename a dir -> create a snapshot s on dst tree -> delete the renamed dir
* -> delete snapshot s on dst tree
* Make sure we destroy everything created after the rename under the renamed
* dir.
*/
@Test public void testRenameDirAndDeleteSnapshot_3() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
hdfs.mkdirs(sdir2);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
final Path foo2=new Path(sdir2,"foo");
hdfs.rename(foo,foo2);
final Path bar2=new Path(foo2,"bar2");
DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED);
final Path bar3=new Path(foo2,"bar3");
DFSTestUtil.createFile(hdfs,bar3,BLOCKSIZE,REPL,SEED);
hdfs.createSnapshot(sdir2,"s3");
hdfs.delete(foo2,true);
hdfs.deleteSnapshot(sdir2,"s3");
final INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory();
Quota.Counts q1=dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(4,q1.get(Quota.NAMESPACE));
final INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
Quota.Counts q2=dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(2,q2.get(Quota.NAMESPACE));
final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1",foo.getName());
INode fooRef=fsdir.getINode(foo_s1.toString());
assertTrue(fooRef instanceof INodeReference.WithName);
INodeReference.WithCount wc=(WithCount)fooRef.asReference().getReferredINode();
assertEquals(1,wc.getReferenceCount());
INodeDirectory fooNode=wc.getReferredINode().asDirectory();
ReadOnlyList children=fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,children.size());
assertEquals(bar.getName(),children.get(0).getLocalName());
List diffList=fooNode.getDiffs().asList();
assertEquals(1,diffList.size());
Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(),diffList.get(0).getSnapshotId());
ChildrenDiff diff=diffList.get(0).getChildrenDiff();
assertEquals(0,diff.getList(ListType.CREATED).size());
assertEquals(0,diff.getList(ListType.DELETED).size());
restartClusterAndCheckImage(true);
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test the rename undo when quota of dst tree is exceeded after rename.
*/
@Test public void testRenameExceedQuota() throws Exception {
final Path test=new Path("/test");
final Path dir1=new Path(test,"dir1");
final Path dir2=new Path(test,"dir2");
final Path sub_dir2=new Path(dir2,"subdir");
final Path subfile_dir2=new Path(sub_dir2,"subfile");
hdfs.mkdirs(dir1);
DFSTestUtil.createFile(hdfs,subfile_dir2,BLOCKSIZE,REPL,SEED);
final Path foo=new Path(dir1,"foo");
DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,dir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2");
hdfs.setQuota(dir2,5,Long.MAX_VALUE - 1);
hdfs.rename(foo,subfile_dir2,Rename.OVERWRITE);
INode dir2Node=fsdir.getINode4Write(dir2.toString());
assertTrue(dir2Node.asDirectory().isSnapshottable());
Quota.Counts counts=dir2Node.computeQuotaUsage();
assertEquals(7,counts.get(Quota.NAMESPACE));
assertEquals(BLOCKSIZE * REPL * 2,counts.get(Quota.DISKSPACE));
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierIdentityVerifierEqualityVerifierHybridVerifier
/**
* Test the rename undo when removing dst node fails
*/
@Test public void testRenameUndo_6() throws Exception {
final Path test=new Path("/test");
final Path dir1=new Path(test,"dir1");
final Path dir2=new Path(test,"dir2");
final Path sub_dir2=new Path(dir2,"subdir");
final Path subsub_dir2=new Path(sub_dir2,"subdir");
hdfs.mkdirs(dir1);
hdfs.mkdirs(subsub_dir2);
final Path foo=new Path(dir1,"foo");
hdfs.mkdirs(foo);
SnapshotTestHelper.createSnapshot(hdfs,dir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2");
hdfs.setQuota(dir2,4,Long.MAX_VALUE - 1);
try {
hdfs.rename(foo,subsub_dir2,Rename.OVERWRITE);
fail("Expect QuotaExceedException");
}
catch ( QuotaExceededException e) {
String msg="Failed to record modification for snapshot: " + "The NameSpace quota (directories and files)" + " is exceeded: quota=4 file count=5";
GenericTestUtils.assertExceptionContains(msg,e);
}
assertTrue(hdfs.exists(foo));
INodeDirectory dir1Node=fsdir.getINode4Write(dir1.toString()).asDirectory();
List childrenList=ReadOnlyList.Util.asList(dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1,childrenList.size());
INode fooNode=childrenList.get(0);
assertTrue(fooNode.asDirectory().isWithSnapshot());
assertSame(dir1Node,fooNode.getParent());
List diffList=dir1Node.getDiffs().asList();
assertEquals(1,diffList.size());
DirectoryDiff diff=diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
INodeDirectory dir2Node=fsdir.getINode4Write(dir2.toString()).asDirectory();
assertTrue(dir2Node.isSnapshottable());
Quota.Counts counts=dir2Node.computeQuotaUsage();
assertEquals(4,counts.get(Quota.NAMESPACE));
assertEquals(0,counts.get(Quota.DISKSPACE));
childrenList=ReadOnlyList.Util.asList(dir2Node.asDirectory().getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1,childrenList.size());
INode subdir2Node=childrenList.get(0);
assertTrue(subdir2Node.asDirectory().isWithSnapshot());
assertSame(dir2Node,subdir2Node.getParent());
assertSame(subdir2Node,fsdir.getINode4Write(sub_dir2.toString()));
INode subsubdir2Node=fsdir.getINode4Write(subsub_dir2.toString());
assertTrue(subsubdir2Node.getClass() == INodeDirectory.class);
assertSame(subdir2Node,subsubdir2Node.getParent());
diffList=(dir2Node).getDiffs().asList();
assertEquals(1,diffList.size());
diff=diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
diffList=subdir2Node.asDirectory().getDiffs().asList();
assertEquals(0,diffList.size());
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test rename where the src/dst directories are both snapshottable
* directories without snapshots. In such case we need to update the
* snapshottable dir list in SnapshotManager.
*/
@Test(timeout=60000) public void testRenameAndUpdateSnapshottableDirs() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(sdir2,"bar");
hdfs.mkdirs(foo);
hdfs.mkdirs(bar);
hdfs.allowSnapshot(foo);
SnapshotTestHelper.createSnapshot(hdfs,bar,snap1);
assertEquals(2,fsn.getSnapshottableDirListing().length);
INodeDirectory fooNode=fsdir.getINode4Write(foo.toString()).asDirectory();
long fooId=fooNode.getId();
try {
hdfs.rename(foo,bar,Rename.OVERWRITE);
fail("Expect exception since " + bar + " is snapshottable and already has snapshots");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains(bar.toString() + " is snapshottable and already has snapshots",e);
}
hdfs.deleteSnapshot(bar,snap1);
hdfs.rename(foo,bar,Rename.OVERWRITE);
SnapshottableDirectoryStatus[] dirs=fsn.getSnapshottableDirListing();
assertEquals(1,dirs.length);
assertEquals(bar,dirs[0].getFullPath());
assertEquals(fooId,dirs[0].getDirStatus().getFileId());
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierIdentityVerifierEqualityVerifierHybridVerifier
/**
* Test rename to an invalid name (xxx/.snapshot)
*/
@Test public void testRenameUndo_7() throws Exception {
final Path root=new Path("/");
final Path foo=new Path(root,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,root,snap1);
final Path invalid=new Path(foo,HdfsConstants.DOT_SNAPSHOT_DIR);
try {
hdfs.rename(bar,invalid);
fail("expect exception since invalid name is used for rename");
}
catch ( Exception e) {
GenericTestUtils.assertExceptionContains("\"" + HdfsConstants.DOT_SNAPSHOT_DIR + "\" is a reserved name",e);
}
INodeDirectory rootNode=fsdir.getINode4Write(root.toString()).asDirectory();
INodeDirectory fooNode=fsdir.getINode4Write(foo.toString()).asDirectory();
ReadOnlyList children=fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,children.size());
List diffList=fooNode.getDiffs().asList();
assertEquals(1,diffList.size());
DirectoryDiff diff=diffList.get(0);
Snapshot s1=rootNode.getSnapshot(DFSUtil.string2Bytes(snap1));
assertEquals(s1.getId(),diff.getSnapshotId());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
INodeFile barNode=fsdir.getINode4Write(bar.toString()).asFile();
assertSame(barNode,children.get(0));
assertSame(fooNode,barNode.getParent());
List barDiffList=barNode.getDiffs().asList();
assertEquals(1,barDiffList.size());
FileDiff barDiff=barDiffList.get(0);
assertEquals(s1.getId(),barDiff.getSnapshotId());
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPL).build();
cluster.waitActive();
restartClusterAndCheckImage(true);
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Rename a single file across snapshottable dirs.
*/
@Test(timeout=60000) public void testRenameFileAcrossSnapshottableDirs() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir2,"foo");
DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
hdfs.createSnapshot(sdir1,"s3");
final Path newfoo=new Path(sdir1,"foo");
hdfs.rename(foo,newfoo);
hdfs.setReplication(newfoo,REPL_1);
final Path foo_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo");
assertTrue(hdfs.exists(foo_s2));
FileStatus status=hdfs.getFileStatus(foo_s2);
assertEquals(REPL,status.getReplication());
final Path foo_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo");
assertFalse(hdfs.exists(foo_s3));
INodeDirectory sdir2Node=fsdir.getINode(sdir2.toString()).asDirectory();
Snapshot s2=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
INodeFile sfoo=fsdir.getINode(newfoo.toString()).asFile();
assertEquals(s2.getId(),sfoo.getDiffs().getLastSnapshotId());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* After the following steps:
*
* 1. Take snapshot s1 on /dir1 at time t1.
* 2. Take snapshot s2 on /dir2 at time t2.
* 3. Modify the subtree of /dir2/foo/ to make it a dir with snapshots.
* 4. Take snapshot s3 on /dir1 at time t3.
* 5. Rename /dir2/foo/ to /dir1/foo/.
*
* When changes happening on foo, the diff should be recorded in snapshot s2.
*/
@Test(timeout=60000) public void testRenameDirAcrossSnapshottableDirs() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir2,"foo");
final Path bar=new Path(foo,"bar");
final Path bar2=new Path(foo,"bar2");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
hdfs.setReplication(bar2,REPL_1);
hdfs.delete(bar,true);
hdfs.createSnapshot(sdir1,"s3");
final Path newfoo=new Path(sdir1,"foo");
hdfs.rename(foo,newfoo);
final Path snapshotBar=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar");
assertTrue(hdfs.exists(snapshotBar));
final Path newBar2=new Path(newfoo,"bar2");
assertTrue(hdfs.exists(newBar2));
hdfs.delete(newBar2,true);
final Path bar2_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar2");
assertTrue(hdfs.exists(bar2_s2));
FileStatus status=hdfs.getFileStatus(bar2_s2);
assertEquals(REPL,status.getReplication());
final Path bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar2");
assertFalse(hdfs.exists(bar2_s3));
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* After rename, delete the snapshot in src
*/
@Test public void testRenameDirAndDeleteSnapshot_2() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir2,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s3");
final Path newfoo=new Path(sdir1,"foo");
hdfs.rename(foo,newfoo);
restartClusterAndCheckImage(true);
final Path bar2=new Path(newfoo,"bar2");
DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED);
hdfs.createSnapshot(sdir1,"s4");
hdfs.delete(newfoo,true);
final Path bar2_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo/bar2");
assertTrue(hdfs.exists(bar2_s4));
final Path bar_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo/bar");
assertTrue(hdfs.exists(bar_s4));
hdfs.deleteSnapshot(sdir1,"s4");
restartClusterAndCheckImage(true);
Path bar_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar");
assertFalse(hdfs.exists(bar_s3));
bar_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo/bar");
assertTrue(hdfs.exists(bar_s3));
Path bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar2");
assertFalse(hdfs.exists(bar2_s3));
bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo/bar2");
assertFalse(hdfs.exists(bar2_s3));
hdfs.deleteSnapshot(sdir2,"s3");
final Path bar_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar");
assertTrue(hdfs.exists(bar_s2));
INodeDirectory sdir2Node=fsdir.getINode(sdir2.toString()).asDirectory();
Snapshot s2=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
final Path foo_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo");
INodeReference fooRef=fsdir.getINode(foo_s2.toString()).asReference();
assertTrue(fooRef instanceof INodeReference.WithName);
INodeReference.WithCount fooWC=(WithCount)fooRef.getReferredINode();
assertEquals(1,fooWC.getReferenceCount());
INodeDirectory fooDir=fooWC.getReferredINode().asDirectory();
List diffs=fooDir.getDiffs().asList();
assertEquals(1,diffs.size());
assertEquals(s2.getId(),diffs.get(0).getSnapshotId());
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir2,"s2");
assertFalse(hdfs.exists(bar_s2));
restartClusterAndCheckImage(true);
Quota.Counts q=fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(4,q.get(Quota.NAMESPACE));
assertEquals(0,q.get(Quota.DISKSPACE));
hdfs.deleteSnapshot(sdir1,"s1");
restartClusterAndCheckImage(true);
q=fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(3,q.get(Quota.NAMESPACE));
assertEquals(0,q.get(Quota.DISKSPACE));
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testRenameWithOverWrite() throws Exception {
final Path root=new Path("/");
final Path foo=new Path(root,"foo");
final Path file1InFoo=new Path(foo,"file1");
final Path file2InFoo=new Path(foo,"file2");
final Path file3InFoo=new Path(foo,"file3");
DFSTestUtil.createFile(hdfs,file1InFoo,1L,REPL,SEED);
DFSTestUtil.createFile(hdfs,file2InFoo,1L,REPL,SEED);
DFSTestUtil.createFile(hdfs,file3InFoo,1L,REPL,SEED);
final Path bar=new Path(root,"bar");
hdfs.mkdirs(bar);
SnapshotTestHelper.createSnapshot(hdfs,root,"s0");
final Path fileInBar=new Path(bar,"file1");
hdfs.rename(file1InFoo,fileInBar);
final Path newDir=new Path(root,"newDir");
hdfs.rename(bar,newDir);
final Path file2InNewDir=new Path(newDir,"file2");
hdfs.rename(file2InFoo,file2InNewDir);
final Path file1InNewDir=new Path(newDir,"file1");
hdfs.rename(file3InFoo,file1InNewDir,Rename.OVERWRITE);
SnapshotTestHelper.createSnapshot(hdfs,root,"s1");
SnapshotDiffReport report=hdfs.getSnapshotDiffReport(root,"s0","s1");
LOG.info("DiffList is \n\"" + report.toString() + "\"");
List entries=report.getDiffList();
assertEquals(7,entries.size());
assertTrue(existsInDiffReport(entries,DiffType.MODIFY,"",null));
assertTrue(existsInDiffReport(entries,DiffType.MODIFY,foo.getName(),null));
assertTrue(existsInDiffReport(entries,DiffType.MODIFY,bar.getName(),null));
assertTrue(existsInDiffReport(entries,DiffType.DELETE,"foo/file1",null));
assertTrue(existsInDiffReport(entries,DiffType.RENAME,"bar","newDir"));
assertTrue(existsInDiffReport(entries,DiffType.RENAME,"foo/file2","newDir/file2"));
assertTrue(existsInDiffReport(entries,DiffType.RENAME,"foo/file3","newDir/file1"));
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test renaming a file and then delete snapshots.
*/
@Test public void testRenameFileAndDeleteSnapshot() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir2,"foo");
DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
hdfs.createSnapshot(sdir1,"s3");
final Path newfoo=new Path(sdir1,"foo");
hdfs.rename(foo,newfoo);
hdfs.setReplication(newfoo,REPL_1);
hdfs.createSnapshot(sdir1,"s4");
hdfs.setReplication(newfoo,REPL_2);
FileStatus status=hdfs.getFileStatus(newfoo);
assertEquals(REPL_2,status.getReplication());
final Path foo_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo");
status=hdfs.getFileStatus(foo_s4);
assertEquals(REPL_1,status.getReplication());
hdfs.createSnapshot(sdir1,"s5");
final Path foo_s5=SnapshotTestHelper.getSnapshotPath(sdir1,"s5","foo");
status=hdfs.getFileStatus(foo_s5);
assertEquals(REPL_2,status.getReplication());
hdfs.deleteSnapshot(sdir1,"s5");
restartClusterAndCheckImage(true);
assertFalse(hdfs.exists(foo_s5));
status=hdfs.getFileStatus(foo_s4);
assertEquals(REPL_1,status.getReplication());
hdfs.deleteSnapshot(sdir1,"s4");
assertFalse(hdfs.exists(foo_s4));
Path foo_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo");
assertFalse(hdfs.exists(foo_s3));
foo_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo");
assertFalse(hdfs.exists(foo_s3));
final Path foo_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo");
assertTrue(hdfs.exists(foo_s2));
status=hdfs.getFileStatus(foo_s2);
assertEquals(REPL,status.getReplication());
INodeFile snode=fsdir.getINode(newfoo.toString()).asFile();
assertEquals(1,snode.getDiffs().asList().size());
INodeDirectory sdir2Node=fsdir.getINode(sdir2.toString()).asDirectory();
Snapshot s2=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
assertEquals(s2.getId(),snode.getDiffs().getLastSnapshotId());
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir2,"s2");
assertFalse(hdfs.exists(foo_s2));
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir1,"s3");
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir1,"s1");
restartClusterAndCheckImage(true);
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=60000) public void testRenameDirectoryInSnapshot() throws Exception {
final Path sub2=new Path(sub1,"sub2");
final Path sub3=new Path(sub1,"sub3");
final Path sub2file1=new Path(sub2,"sub2file1");
final String sub1snap1="sub1snap1";
hdfs.mkdirs(sub1);
hdfs.mkdirs(sub2);
DFSTestUtil.createFile(hdfs,sub2file1,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sub1,sub1snap1);
hdfs.rename(sub2,sub3);
SnapshotDiffReport diffReport=hdfs.getSnapshotDiffReport(sub1,sub1snap1,"");
LOG.info("DiffList is \n\"" + diffReport.toString() + "\"");
List entries=diffReport.getDiffList();
assertEquals(2,entries.size());
assertTrue(existsInDiffReport(entries,DiffType.MODIFY,"",null));
assertTrue(existsInDiffReport(entries,DiffType.RENAME,sub2.getName(),sub3.getName()));
}
InternalCallVerifierEqualityVerifier
/**
* Make sure that a delete of a non-zero-length file which results in a
* zero-length file in a snapshot works.
*/
@Test public void testDeletionOfLaterBlocksWithZeroSizeFirstBlock() throws Exception {
final Path foo=new Path("/foo");
final Path bar=new Path(foo,"bar");
final byte[] testData="foo bar baz".getBytes();
DFSTestUtil.createFile(hdfs,bar,0,REPLICATION,0L);
assertEquals(0,fsdir.getINode4Write(bar.toString()).asFile().getBlocks().length);
SnapshotTestHelper.createSnapshot(hdfs,foo,"s0");
FSDataOutputStream out=hdfs.append(bar);
out.write(testData);
out.close();
INodeFile barNode=fsdir.getINode4Write(bar.toString()).asFile();
BlockInfo[] blks=barNode.getBlocks();
assertEquals(1,blks.length);
assertEquals(testData.length,blks[0].getNumBytes());
hdfs.delete(bar,true);
cluster.getNameNode().getRpcServer().setSafeMode(SafeModeAction.SAFEMODE_ENTER,false);
cluster.getNameNode().getRpcServer().saveNamespace();
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Make sure we delete 0-sized block when deleting an INodeFileUCWithSnapshot
*/
@Test public void testDeletionWithZeroSizeBlock() throws Exception {
final Path foo=new Path("/foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPLICATION,0L);
SnapshotTestHelper.createSnapshot(hdfs,foo,"s0");
hdfs.append(bar);
INodeFile barNode=fsdir.getINode4Write(bar.toString()).asFile();
BlockInfo[] blks=barNode.getBlocks();
assertEquals(1,blks.length);
assertEquals(BLOCKSIZE,blks[0].getNumBytes());
ExtendedBlock previous=new ExtendedBlock(fsn.getBlockPoolId(),blks[0]);
cluster.getNameNodeRpc().addBlock(bar.toString(),hdfs.getClient().getClientName(),previous,null,barNode.getId(),null);
SnapshotTestHelper.createSnapshot(hdfs,foo,"s1");
barNode=fsdir.getINode4Write(bar.toString()).asFile();
blks=barNode.getBlocks();
assertEquals(2,blks.length);
assertEquals(BLOCKSIZE,blks[0].getNumBytes());
assertEquals(0,blks[1].getNumBytes());
hdfs.delete(bar,true);
final Path sbar=SnapshotTestHelper.getSnapshotPath(foo,"s1",bar.getName());
barNode=fsdir.getINode(sbar.toString()).asFile();
blks=barNode.getBlocks();
assertEquals(1,blks.length);
assertEquals(BLOCKSIZE,blks[0].getNumBytes());
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* 1. rename under-construction file with 0-sized blocks after snapshot.
* 2. delete the renamed directory.
* make sure we delete the 0-sized block.
* see HDFS-5476.
*/
@Test public void testDeletionWithZeroSizeBlock3() throws Exception {
final Path foo=new Path("/foo");
final Path subDir=new Path(foo,"sub");
final Path bar=new Path(subDir,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPLICATION,0L);
hdfs.append(bar);
INodeFile barNode=fsdir.getINode4Write(bar.toString()).asFile();
BlockInfo[] blks=barNode.getBlocks();
assertEquals(1,blks.length);
ExtendedBlock previous=new ExtendedBlock(fsn.getBlockPoolId(),blks[0]);
cluster.getNameNodeRpc().addBlock(bar.toString(),hdfs.getClient().getClientName(),previous,null,barNode.getId(),null);
SnapshotTestHelper.createSnapshot(hdfs,foo,"s1");
final Path bar2=new Path(subDir,"bar2");
hdfs.rename(bar,bar2);
INodeFile bar2Node=fsdir.getINode4Write(bar2.toString()).asFile();
blks=bar2Node.getBlocks();
assertEquals(2,blks.length);
assertEquals(BLOCKSIZE,blks[0].getNumBytes());
assertEquals(0,blks[1].getNumBytes());
hdfs.delete(subDir,true);
final Path sbar=SnapshotTestHelper.getSnapshotPath(foo,"s1","sub/bar");
barNode=fsdir.getINode(sbar.toString()).asFile();
blks=barNode.getBlocks();
assertEquals(1,blks.length);
assertEquals(BLOCKSIZE,blks[0].getNumBytes());
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Make sure we delete 0-sized block when deleting an under-construction file
*/
@Test public void testDeletionWithZeroSizeBlock2() throws Exception {
final Path foo=new Path("/foo");
final Path subDir=new Path(foo,"sub");
final Path bar=new Path(subDir,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPLICATION,0L);
hdfs.append(bar);
INodeFile barNode=fsdir.getINode4Write(bar.toString()).asFile();
BlockInfo[] blks=barNode.getBlocks();
assertEquals(1,blks.length);
ExtendedBlock previous=new ExtendedBlock(fsn.getBlockPoolId(),blks[0]);
cluster.getNameNodeRpc().addBlock(bar.toString(),hdfs.getClient().getClientName(),previous,null,barNode.getId(),null);
SnapshotTestHelper.createSnapshot(hdfs,foo,"s1");
barNode=fsdir.getINode4Write(bar.toString()).asFile();
blks=barNode.getBlocks();
assertEquals(2,blks.length);
assertEquals(BLOCKSIZE,blks[0].getNumBytes());
assertEquals(0,blks[1].getNumBytes());
hdfs.delete(subDir,true);
final Path sbar=SnapshotTestHelper.getSnapshotPath(foo,"s1","sub/bar");
barNode=fsdir.getINode(sbar.toString()).asFile();
blks=barNode.getBlocks();
assertEquals(1,blks.length);
assertEquals(BLOCKSIZE,blks[0].getNumBytes());
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test deleting the earliest (first) snapshot. In this simplest scenario, the
* snapshots are taken on the same directory, and we do not need to combine
* snapshot diffs.
*/
@Test(timeout=300000) public void testDeleteEarliestSnapshot1() throws Exception {
Path file0=new Path(sub,"file0");
Path file1=new Path(sub,"file1");
DFSTestUtil.createFile(hdfs,file0,BLOCKSIZE,REPLICATION,seed);
DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPLICATION,seed);
String snapshotName="s1";
try {
hdfs.deleteSnapshot(sub,snapshotName);
fail("SnapshotException expected: " + sub.toString() + " is not snapshottable yet");
}
catch ( Exception e) {
GenericTestUtils.assertExceptionContains("Directory is not a snapshottable directory: " + sub,e);
}
hdfs.allowSnapshot(sub);
try {
hdfs.deleteSnapshot(sub,snapshotName);
fail("SnapshotException expected: snapshot " + snapshotName + " does not exist for "+ sub.toString());
}
catch ( Exception e) {
GenericTestUtils.assertExceptionContains("Cannot delete snapshot " + snapshotName + " from path "+ sub.toString()+ ": the snapshot does not exist.",e);
}
SnapshotTestHelper.createSnapshot(hdfs,sub,snapshotName);
checkQuotaUsageComputation(sub,4,BLOCKSIZE * REPLICATION * 2);
hdfs.deleteSnapshot(sub,snapshotName);
checkQuotaUsageComputation(sub,3,BLOCKSIZE * REPLICATION * 2);
hdfs.createSnapshot(sub,snapshotName);
checkQuotaUsageComputation(sub,4,BLOCKSIZE * REPLICATION * 2);
Path newFile=new Path(sub,"newFile");
DFSTestUtil.createFile(hdfs,newFile,BLOCKSIZE,REPLICATION,seed);
String snapshotName2="s2";
hdfs.createSnapshot(sub,snapshotName2);
checkQuotaUsageComputation(sub,6,BLOCKSIZE * REPLICATION * 3);
Path ss=SnapshotTestHelper.getSnapshotPath(sub,snapshotName2,"newFile");
FileStatus statusBeforeDeletion=hdfs.getFileStatus(ss);
hdfs.deleteSnapshot(sub,snapshotName);
checkQuotaUsageComputation(sub,5,BLOCKSIZE * REPLICATION * 3);
FileStatus statusAfterDeletion=hdfs.getFileStatus(ss);
System.out.println("Before deletion: " + statusBeforeDeletion.toString() + "\n"+ "After deletion: "+ statusAfterDeletion.toString());
assertEquals(statusBeforeDeletion.toString(),statusAfterDeletion.toString());
}
UtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test deleting the earliest (first) snapshot. In this more complicated
* scenario, the snapshots are taken across directories.
*
* The test covers the following scenarios:
* 1. delete the first diff in the diff list of a directory
* 2. delete the first diff in the diff list of a file
*
* Also, the recursive cleanTree process should cover both INodeFile and
* INodeDirectory.
*/
@Test(timeout=300000) public void testDeleteEarliestSnapshot2() throws Exception {
Path noChangeDir=new Path(sub,"noChangeDir");
Path noChangeFile=new Path(noChangeDir,"noChangeFile");
Path metaChangeFile=new Path(noChangeDir,"metaChangeFile");
Path metaChangeDir=new Path(noChangeDir,"metaChangeDir");
Path toDeleteFile=new Path(metaChangeDir,"toDeleteFile");
DFSTestUtil.createFile(hdfs,noChangeFile,BLOCKSIZE,REPLICATION,seed);
DFSTestUtil.createFile(hdfs,metaChangeFile,BLOCKSIZE,REPLICATION,seed);
DFSTestUtil.createFile(hdfs,toDeleteFile,BLOCKSIZE,REPLICATION,seed);
final INodeFile toDeleteFileNode=TestSnapshotBlocksMap.assertBlockCollection(toDeleteFile.toString(),1,fsdir,blockmanager);
BlockInfo[] blocks=toDeleteFileNode.getBlocks();
SnapshotTestHelper.createSnapshot(hdfs,dir,"s0");
checkQuotaUsageComputation(dir,8,3 * BLOCKSIZE * REPLICATION);
hdfs.delete(toDeleteFile,true);
checkQuotaUsageComputation(dir,10,3 * BLOCKSIZE * REPLICATION);
hdfs.setReplication(metaChangeFile,REPLICATION_1);
hdfs.setOwner(metaChangeDir,"unknown","unknown");
checkQuotaUsageComputation(dir,11,3 * BLOCKSIZE * REPLICATION);
hdfs.createSnapshot(dir,"s1");
checkQuotaUsageComputation(dir,12,3 * BLOCKSIZE * REPLICATION);
hdfs.deleteSnapshot(dir,"s0");
checkQuotaUsageComputation(dir,7,2 * BLOCKSIZE * REPLICATION - BLOCKSIZE);
for ( BlockInfo b : blocks) {
assertNull(blockmanager.getBlockCollection(b));
}
final INodeDirectory dirNode=fsdir.getINode(dir.toString()).asDirectory();
Snapshot snapshot0=dirNode.getSnapshot(DFSUtil.string2Bytes("s0"));
assertNull(snapshot0);
Snapshot snapshot1=dirNode.getSnapshot(DFSUtil.string2Bytes("s1"));
DirectoryDiffList diffList=dirNode.getDiffs();
assertEquals(1,diffList.asList().size());
assertEquals(snapshot1.getId(),diffList.getLast().getSnapshotId());
diffList=fsdir.getINode(metaChangeDir.toString()).asDirectory().getDiffs();
assertEquals(0,diffList.asList().size());
final INodeDirectory noChangeDirNode=(INodeDirectory)fsdir.getINode(noChangeDir.toString());
assertEquals(INodeDirectory.class,noChangeDirNode.getClass());
final INodeFile noChangeFileNode=(INodeFile)fsdir.getINode(noChangeFile.toString());
assertEquals(INodeFile.class,noChangeFileNode.getClass());
TestSnapshotBlocksMap.assertBlockCollection(noChangeFile.toString(),1,fsdir,blockmanager);
FileStatus status=hdfs.getFileStatus(metaChangeDir);
assertEquals("unknown",status.getOwner());
assertEquals("unknown",status.getGroup());
status=hdfs.getFileStatus(metaChangeFile);
assertEquals(REPLICATION_1,status.getReplication());
TestSnapshotBlocksMap.assertBlockCollection(metaChangeFile.toString(),1,fsdir,blockmanager);
try {
status=hdfs.getFileStatus(toDeleteFile);
fail("should throw FileNotFoundException");
}
catch ( FileNotFoundException e) {
GenericTestUtils.assertExceptionContains("File does not exist: " + toDeleteFile.toString(),e);
}
final Path toDeleteFileInSnapshot=SnapshotTestHelper.getSnapshotPath(dir,"s0",toDeleteFile.toString().substring(dir.toString().length()));
try {
status=hdfs.getFileStatus(toDeleteFileInSnapshot);
fail("should throw FileNotFoundException");
}
catch ( FileNotFoundException e) {
GenericTestUtils.assertExceptionContains("File does not exist: " + toDeleteFileInSnapshot.toString(),e);
}
}
InternalCallVerifierEqualityVerifier
/**
* Test applying editlog of operation which deletes a snapshottable directory
* without snapshots. The snapshottable dir list in snapshot manager should be
* updated.
*/
@Test(timeout=300000) public void testApplyEditLogForDeletion() throws Exception {
final Path foo=new Path("/foo");
final Path bar1=new Path(foo,"bar1");
final Path bar2=new Path(foo,"bar2");
hdfs.mkdirs(bar1);
hdfs.mkdirs(bar2);
hdfs.allowSnapshot(bar1);
hdfs.allowSnapshot(bar2);
assertEquals(2,cluster.getNamesystem().getSnapshotManager().getNumSnapshottableDirs());
assertEquals(2,cluster.getNamesystem().getSnapshotManager().getSnapshottableDirs().length);
hdfs.delete(foo,true);
cluster.restartNameNode(0);
assertEquals(0,cluster.getNamesystem().getSnapshotManager().getNumSnapshottableDirs());
assertEquals(0,cluster.getNamesystem().getSnapshotManager().getSnapshottableDirs().length);
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.restartNameNode(0);
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test deleting a directory which is a descendant of a snapshottable
* directory. In the test we need to cover the following cases:
*
* 1. Delete current INodeFile/INodeDirectory without taking any snapshot.
* 2. Delete current INodeFile/INodeDirectory while snapshots have been taken
* on ancestor(s).
* 3. Delete current INodeFileWithSnapshot.
* 4. Delete current INodeDirectoryWithSnapshot.
*
APIUtilityVerifierIterativeVerifierInternalCallVerifierEqualityVerifier
/**
* Test replication number calculation for a file with snapshots.
*/
@Test(timeout=60000) public void testReplicationWithSnapshot() throws Exception {
short fileRep=1;
DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,fileRep,seed);
Map snapshotRepMap=new HashMap();
for (; fileRep < NUMDATANODE; ) {
Path snapshotRoot=SnapshotTestHelper.createSnapshot(hdfs,sub1,"s" + fileRep);
Path snapshot=new Path(snapshotRoot,file1.getName());
assertEquals(fileRep,getINodeFile(snapshot).getFileReplication());
snapshotRepMap.put(snapshot,fileRep);
hdfs.setReplication(file1,++fileRep);
checkFileReplication(file1,fileRep,fileRep);
checkSnapshotFileReplication(file1,snapshotRepMap,fileRep);
}
hdfs.setReplication(file1,REPLICATION);
checkFileReplication(file1,REPLICATION,(short)(NUMDATANODE - 1));
checkSnapshotFileReplication(file1,snapshotRepMap,(short)(NUMDATANODE - 1));
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Test replication for a file with snapshots, also including the scenario
* where the original file is deleted
*/
@Test(timeout=60000) public void testReplicationAfterDeletion() throws Exception {
DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPLICATION,seed);
Map snapshotRepMap=new HashMap();
for (int i=1; i <= 3; i++) {
Path root=SnapshotTestHelper.createSnapshot(hdfs,sub1,"s" + i);
Path ssFile=new Path(root,file1.getName());
snapshotRepMap.put(ssFile,REPLICATION);
}
checkFileReplication(file1,REPLICATION,REPLICATION);
checkSnapshotFileReplication(file1,snapshotRepMap,REPLICATION);
hdfs.delete(file1,true);
for ( Path ss : snapshotRepMap.keySet()) {
final INodeFile ssInode=getINodeFile(ss);
assertEquals(REPLICATION,ssInode.getBlockReplication());
assertEquals(snapshotRepMap.get(ss).shortValue(),ssInode.getFileReplication());
}
}
InternalCallVerifierEqualityVerifier
/**
* 1) Save xattrs, then create snapshot. Assert that inode of original and
* snapshot have same xattrs. 2) Change the original xattrs, assert snapshot
* still has old xattrs.
*/
@Test public void testXAttrForSnapshotRootAfterChange() throws Exception {
FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0700));
hdfs.setXAttr(path,name1,value1);
hdfs.setXAttr(path,name2,value2);
SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName);
Map xattrs=hdfs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(value2,xattrs.get(name2));
xattrs=hdfs.getXAttrs(snapshotPath);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(value2,xattrs.get(name2));
hdfs.setXAttr(path,name1,newValue1);
doSnapshotRootChangeAssertions(path,snapshotPath);
restart(false);
doSnapshotRootChangeAssertions(path,snapshotPath);
restart(true);
doSnapshotRootChangeAssertions(path,snapshotPath);
}
InternalCallVerifierEqualityVerifier
/**
* Tests modifying xattrs on a directory that has been snapshotted
*/
@Test(timeout=120000) public void testModifyReadsCurrentState() throws Exception {
FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName);
hdfs.setXAttr(path,name1,value1);
hdfs.setXAttr(path,name2,value2);
Map xattrs=hdfs.getXAttrs(path);
assertEquals(xattrs.size(),2);
assertArrayEquals(value1,xattrs.get(name1));
assertArrayEquals(value2,xattrs.get(name2));
xattrs=hdfs.getXAttrs(snapshotPath);
assertEquals(xattrs.size(),0);
hdfs.setXAttr(path,name1,value2,EnumSet.of(XAttrSetFlag.REPLACE));
xattrs=hdfs.getXAttrs(path);
assertEquals(xattrs.size(),2);
assertArrayEquals(value2,xattrs.get(name1));
assertArrayEquals(value2,xattrs.get(name2));
hdfs.setXAttr(path,name2,value1,EnumSet.of(XAttrSetFlag.REPLACE));
xattrs=hdfs.getXAttrs(path);
assertEquals(xattrs.size(),2);
assertArrayEquals(value2,xattrs.get(name1));
assertArrayEquals(value1,xattrs.get(name2));
xattrs=hdfs.getXAttrs(snapshotPath);
assertEquals(xattrs.size(),0);
hdfs.removeXAttr(path,name1);
hdfs.removeXAttr(path,name2);
xattrs=hdfs.getXAttrs(path);
assertEquals(xattrs.size(),0);
}
InternalCallVerifierEqualityVerifier
/**
* 1) Save xattrs, then create snapshot. Assert that inode of original and
* snapshot have same xattrs. 2) Remove some original xattrs, assert snapshot
* still has old xattrs.
*/
@Test public void testXAttrForSnapshotRootAfterRemove() throws Exception {
FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0700));
hdfs.setXAttr(path,name1,value1);
hdfs.setXAttr(path,name2,value2);
SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName);
Map xattrs=hdfs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(value2,xattrs.get(name2));
xattrs=hdfs.getXAttrs(snapshotPath);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(value2,xattrs.get(name2));
hdfs.removeXAttr(path,name1);
hdfs.removeXAttr(path,name2);
doSnapshotRootRemovalAssertions(path,snapshotPath);
restart(false);
doSnapshotRootRemovalAssertions(path,snapshotPath);
restart(true);
doSnapshotRootRemovalAssertions(path,snapshotPath);
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Test that users can copy a snapshot while preserving its xattrs.
*/
@Test(timeout=120000) public void testCopySnapshotShouldPreserveXAttrs() throws Exception {
FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0700));
hdfs.setXAttr(path,name1,value1);
hdfs.setXAttr(path,name2,value2);
SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName);
Path snapshotCopy=new Path(path.toString() + "-copy");
String[] argv=new String[]{"-cp","-px",snapshotPath.toUri().toString(),snapshotCopy.toUri().toString()};
int ret=ToolRunner.run(new FsShell(conf),argv);
assertEquals("cp -px is not working on a snapshot",SUCCESS,ret);
Map xattrs=hdfs.getXAttrs(snapshotCopy);
assertArrayEquals(value1,xattrs.get(name1));
assertArrayEquals(value2,xattrs.get(name2));
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierEqualityVerifierPublicFieldVerifier
@Test public void testDataLocality() throws Exception {
final Configuration conf=WebHdfsTestUtil.createConf();
final String[] racks={RACK0,RACK0,RACK1,RACK1,RACK2,RACK2};
final int nDataNodes=racks.length;
LOG.info("nDataNodes=" + nDataNodes + ", racks="+ Arrays.asList(racks));
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(nDataNodes).racks(racks).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs=cluster.getFileSystem();
final NameNode namenode=cluster.getNameNode();
final DatanodeManager dm=namenode.getNamesystem().getBlockManager().getDatanodeManager();
LOG.info("dm=" + dm);
final long blocksize=DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
final String f="/foo";
{
for (int i=0; i < nDataNodes; i++) {
final DataNode dn=cluster.getDataNodes().get(i);
final String ipAddr=dm.getDatanode(dn.getDatanodeId()).getIpAddr();
final DatanodeInfo chosen=NamenodeWebHdfsMethods.chooseDatanode(namenode,f,PutOpParam.Op.CREATE,-1L,blocksize,null);
Assert.assertEquals(ipAddr,chosen.getIpAddr());
}
}
final Path p=new Path(f);
final FSDataOutputStream out=dfs.create(p,(short)1);
out.write(1);
out.close();
final LocatedBlocks locatedblocks=NameNodeAdapter.getBlockLocations(namenode,f,0,1);
final List lb=locatedblocks.getLocatedBlocks();
Assert.assertEquals(1,lb.size());
final DatanodeInfo[] locations=lb.get(0).getLocations();
Assert.assertEquals(1,locations.length);
final DatanodeInfo expected=locations[0];
{
final DatanodeInfo chosen=NamenodeWebHdfsMethods.chooseDatanode(namenode,f,GetOpParam.Op.GETFILECHECKSUM,-1L,blocksize,null);
Assert.assertEquals(expected,chosen);
}
{
final DatanodeInfo chosen=NamenodeWebHdfsMethods.chooseDatanode(namenode,f,GetOpParam.Op.OPEN,0,blocksize,null);
Assert.assertEquals(expected,chosen);
}
{
final DatanodeInfo chosen=NamenodeWebHdfsMethods.chooseDatanode(namenode,f,PostOpParam.Op.APPEND,-1L,blocksize,null);
Assert.assertEquals(expected,chosen);
}
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierEqualityVerifierPublicFieldVerifier
@Test public void testExcludeDataNodes() throws Exception {
final Configuration conf=WebHdfsTestUtil.createConf();
final String[] racks={RACK0,RACK0,RACK1,RACK1,RACK2,RACK2};
final String[] hosts={"DataNode1","DataNode2","DataNode3","DataNode4","DataNode5","DataNode6"};
final int nDataNodes=hosts.length;
LOG.info("nDataNodes=" + nDataNodes + ", racks="+ Arrays.asList(racks)+ ", hosts="+ Arrays.asList(hosts));
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).hosts(hosts).numDataNodes(nDataNodes).racks(racks).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs=cluster.getFileSystem();
final NameNode namenode=cluster.getNameNode();
final DatanodeManager dm=namenode.getNamesystem().getBlockManager().getDatanodeManager();
LOG.info("dm=" + dm);
final long blocksize=DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
final String f="/foo";
final Path p=new Path(f);
final FSDataOutputStream out=dfs.create(p,(short)3);
out.write(1);
out.close();
final LocatedBlocks locatedblocks=NameNodeAdapter.getBlockLocations(namenode,f,0,1);
final List lb=locatedblocks.getLocatedBlocks();
Assert.assertEquals(1,lb.size());
final DatanodeInfo[] locations=lb.get(0).getLocations();
Assert.assertEquals(3,locations.length);
StringBuffer sb=new StringBuffer();
for (int i=0; i < 2; i++) {
sb.append(locations[i].getXferAddr());
{
final DatanodeInfo chosen=NamenodeWebHdfsMethods.chooseDatanode(namenode,f,GetOpParam.Op.GETFILECHECKSUM,-1L,blocksize,sb.toString());
for (int j=0; j <= i; j++) {
Assert.assertNotEquals(locations[j].getHostName(),chosen.getHostName());
}
}
{
final DatanodeInfo chosen=NamenodeWebHdfsMethods.chooseDatanode(namenode,f,GetOpParam.Op.OPEN,0,blocksize,sb.toString());
for (int j=0; j <= i; j++) {
Assert.assertNotEquals(locations[j].getHostName(),chosen.getHostName());
}
}
{
final DatanodeInfo chosen=NamenodeWebHdfsMethods.chooseDatanode(namenode,f,PostOpParam.Op.APPEND,-1L,blocksize,sb.toString());
for (int j=0; j <= i; j++) {
Assert.assertNotEquals(locations[j].getHostName(),chosen.getHostName());
}
}
sb.append(",");
}
}
finally {
cluster.shutdown();
}
}
BooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that, if automatic HA is enabled, none of the mutative operations
* will succeed, unless the -forcemanual flag is specified.
* @throws Exception
*/
@Test public void testMutativeOperationsWithAutoHaEnabled() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
HdfsConfiguration conf=getHAConf();
conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY,true);
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,getFencerTrueCommand());
tool.setConf(conf);
assertEquals(-1,runTool("-transitionToActive","nn1"));
assertTrue(errOutput.contains("Refusing to manually manage"));
assertEquals(-1,runTool("-transitionToStandby","nn1"));
assertTrue(errOutput.contains("Refusing to manually manage"));
Mockito.verify(mockProtocol,Mockito.never()).transitionToActive(anyReqInfo());
Mockito.verify(mockProtocol,Mockito.never()).transitionToStandby(anyReqInfo());
setupConfirmationOnSystemIn();
assertEquals(0,runTool("-transitionToActive","-forcemanual","nn1"));
setupConfirmationOnSystemIn();
assertEquals(0,runTool("-transitionToStandby","-forcemanual","nn1"));
Mockito.verify(mockProtocol,Mockito.times(1)).transitionToActive(reqInfoCaptor.capture());
Mockito.verify(mockProtocol,Mockito.times(1)).transitionToStandby(reqInfoCaptor.capture());
for ( StateChangeRequestInfo ri : reqInfoCaptor.getAllValues()) {
assertEquals(RequestSource.REQUEST_BY_USER_FORCED,ri.getSource());
}
}
EqualityVerifier
@Test public void testCheckHealth() throws Exception {
assertEquals(0,runTool("-checkHealth","nn1"));
Mockito.verify(mockProtocol).monitorHealth();
Mockito.doThrow(new HealthCheckFailedException("fake health check failure")).when(mockProtocol).monitorHealth();
assertEquals(-1,runTool("-checkHealth","nn1"));
assertOutputContains("Health check failed: fake health check failure");
}
EqualityVerifier
/**
* Test that the fencing configuration can be overridden per-nameservice
* or per-namenode
*/
@Test public void testFencingConfigPerNameNode() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
final String nsSpecificKey=DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY + "." + NSID;
final String nnSpecificKey=nsSpecificKey + ".nn1";
HdfsConfiguration conf=getHAConf();
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,getFencerTrueCommand());
tool.setConf(conf);
assertEquals(0,runTool("-failover","nn1","nn2","--forcefence"));
conf.set(nnSpecificKey,getFencerFalseCommand());
tool.setConf(conf);
assertEquals(-1,runTool("-failover","nn1","nn2","--forcefence"));
conf.unset(nnSpecificKey);
conf.set(nsSpecificKey,getFencerFalseCommand());
tool.setConf(conf);
assertEquals(-1,runTool("-failover","nn1","nn2","--forcefence"));
conf.set(nsSpecificKey,getFencerTrueCommand());
tool.setConf(conf);
assertEquals(0,runTool("-failover","nn1","nn2","--forcefence"));
}
EqualityVerifier
/**
* Test that, even if automatic HA is enabled, the monitoring operations
* still function correctly.
*/
@Test public void testMonitoringOperationsWithAutoHaEnabled() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
HdfsConfiguration conf=getHAConf();
conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY,true);
tool.setConf(conf);
assertEquals(0,runTool("-checkHealth","nn1"));
Mockito.verify(mockProtocol).monitorHealth();
assertEquals(0,runTool("-getServiceState","nn1"));
Mockito.verify(mockProtocol).getServiceStatus();
}
EqualityVerifier
@Test public void testHelp() throws Exception {
assertEquals(0,runTool("-help"));
assertEquals(0,runTool("-help","transitionToActive"));
assertOutputContains("Transitions the service into Active");
}
BooleanVerifierEqualityVerifierHybridVerifier
@Test public void testTryFailoverToSafeMode() throws Exception {
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,TestDFSHAAdmin.getFencerTrueCommand());
tool.setConf(conf);
NameNodeAdapter.enterSafeMode(cluster.getNameNode(0),false);
assertEquals(-1,runTool("-failover","nn2","nn1"));
assertTrue("Bad output: " + errOutput,errOutput.contains("is not ready to become active: " + "The NameNode is in safemode"));
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test the OfflineEditsViewer
*/
@Test public void testGenerated() throws IOException {
String edits=nnHelper.generateEdits();
LOG.info("Generated edits=" + edits);
String editsParsedXml=folder.newFile("editsParsed.xml").getAbsolutePath();
String editsReparsed=folder.newFile("editsParsed").getAbsolutePath();
assertEquals(0,runOev(edits,editsParsedXml,"xml",false));
assertEquals(0,runOev(editsParsedXml,editsReparsed,"binary",false));
assertTrue("Edits " + edits + " should have all op codes",hasAllOpCodes(edits));
LOG.info("Comparing generated file " + editsReparsed + " with reference file "+ edits);
assertTrue("Generated edits and reparsed (bin to XML to bin) should be same",filesEqualIgnoreTrailingZeros(edits,editsReparsed));
}
BooleanVerifierEqualityVerifierHybridVerifier
@Test public void testStored() throws IOException {
final String cacheDir=System.getProperty("test.cache.data","build/test/cache");
String editsStored=cacheDir + "/editsStored";
String editsStoredParsedXml=cacheDir + "/editsStoredParsed.xml";
String editsStoredReparsed=cacheDir + "/editsStoredReparsed";
String editsStoredXml=cacheDir + "/editsStored.xml";
assertEquals(0,runOev(editsStored,editsStoredParsedXml,"xml",false));
assertEquals(0,runOev(editsStoredParsedXml,editsStoredReparsed,"binary",false));
assertTrue("Edits " + editsStored + " should have all op codes",hasAllOpCodes(editsStored));
assertTrue("Reference XML edits and parsed to XML should be same",FileUtils.contentEqualsIgnoreEOL(new File(editsStoredXml),new File(editsStoredParsedXml),"UTF-8"));
assertTrue("Reference edits and reparsed (bin to XML to bin) should be same",filesEqualIgnoreTrailingZeros(editsStored,editsStoredReparsed));
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test case where there is no existing file
*/
@Test public void testOverwriteFile() throws IOException {
assertTrue("Creating empty dst file",DST_FILE.createNewFile());
OutputStream fos=new AtomicFileOutputStream(DST_FILE);
assertTrue("Empty file still exists",DST_FILE.exists());
fos.write(TEST_STRING.getBytes());
fos.flush();
assertEquals("",DFSTestUtil.readFile(DST_FILE));
fos.close();
String readBackData=DFSTestUtil.readFile(DST_FILE);
assertEquals(TEST_STRING,readBackData);
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test case where there is no existing file
*/
@Test public void testWriteNewFile() throws IOException {
OutputStream fos=new AtomicFileOutputStream(DST_FILE);
assertFalse(DST_FILE.exists());
fos.write(TEST_STRING.getBytes());
fos.flush();
assertFalse(DST_FILE.exists());
fos.close();
assertTrue(DST_FILE.exists());
String readBackData=DFSTestUtil.readFile(DST_FILE);
assertEquals(TEST_STRING,readBackData);
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* Test case where the flush() fails at close time - make sure
* that we clean up after ourselves and don't touch any
* existing file at the destination
*/
@Test public void testFailToFlush() throws IOException {
FileOutputStream fos=new FileOutputStream(DST_FILE);
fos.write(TEST_STRING_2.getBytes());
fos.close();
OutputStream failingStream=createFailingStream();
failingStream.write(TEST_STRING.getBytes());
try {
failingStream.close();
fail("Close didn't throw exception");
}
catch ( IOException ioe) {
}
assertEquals(TEST_STRING_2,DFSTestUtil.readFile(DST_FILE));
assertEquals("Temporary file should have been cleaned up",DST_FILE.getName(),Joiner.on(",").join(TEST_DIR.list()));
}
UtilityVerifierEqualityVerifierHybridVerifier
@Test public void testReadArrayNotEnough() throws IOException {
ExactSizeInputStream s=new ExactSizeInputStream(byteStream("he"),5);
byte[] buf=new byte[10];
assertEquals(2,s.read(buf,0,5));
try {
s.read(buf,2,3);
fail("Read buf when should be out of data");
}
catch ( EOFException e) {
}
}
UtilityVerifierEqualityVerifierHybridVerifier
@Test public void testSkipNotEnough() throws IOException {
ExactSizeInputStream s=new ExactSizeInputStream(byteStream("he"),5);
assertEquals(2,s.skip(3));
try {
s.skip(1);
fail("Skip when should be out of data");
}
catch ( EOFException e) {
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testOneElementBasic(){
LOG.info("Test one element basic");
set.add(list.get(0));
assertEquals(1,set.size());
assertFalse(set.isEmpty());
Iterator iter=set.iterator();
assertTrue(iter.hasNext());
assertEquals(list.get(0),iter.next());
assertFalse(iter.hasNext());
LOG.info("Test one element basic - DONE");
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testMultiBasic(){
LOG.info("Test multi element basic");
for ( Integer i : list) {
assertTrue(set.add(i));
}
assertEquals(list.size(),set.size());
for ( Integer i : list) {
assertTrue(set.contains(i));
}
for ( Integer i : list) {
assertFalse(set.add(i));
}
for ( Integer i : list) {
assertTrue(set.contains(i));
}
Iterator iter=set.iterator();
int num=0;
while (iter.hasNext()) {
Integer next=iter.next();
assertNotNull(next);
assertTrue(list.contains(next));
num++;
}
assertEquals(list.size(),num);
LOG.info("Test multi element basic - DONE");
}
IterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testMultiBasic(){
LOG.info("Test multi element basic");
for ( Integer i : list) {
assertTrue(set.add(i));
}
assertEquals(list.size(),set.size());
for ( Integer i : list) {
assertTrue(set.contains(i));
}
for ( Integer i : list) {
assertFalse(set.add(i));
}
for ( Integer i : list) {
assertTrue(set.contains(i));
}
Iterator iter=set.iterator();
int num=0;
while (iter.hasNext()) {
assertEquals(list.get(num++),iter.next());
}
assertEquals(list.size(),num);
LOG.info("Test multi element basic - DONE");
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=60000) public void testBookmarkAdvancesOnRemoveOfSameElement(){
LOG.info("Test that the bookmark advances if we remove its element.");
assertTrue(set.add(list.get(0)));
assertTrue(set.add(list.get(1)));
assertTrue(set.add(list.get(2)));
Iterator it=set.getBookmark();
assertEquals(it.next(),list.get(0));
set.remove(list.get(1));
it=set.getBookmark();
assertEquals(it.next(),list.get(2));
}
InternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testPollOneElement(){
LOG.info("Test poll one element");
set.add(list.get(0));
assertEquals(list.get(0),set.pollFirst());
assertNull(set.pollFirst());
LOG.info("Test poll one element - DONE");
}
IterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testPollAll(){
LOG.info("Test poll all");
for ( Integer i : list) {
assertTrue(set.add(i));
}
while (set.pollFirst() != null) ;
assertEquals(0,set.size());
assertTrue(set.isEmpty());
for (int i=0; i < NUM; i++) {
assertFalse(set.contains(list.get(i)));
}
Iterator iter=set.iterator();
assertFalse(iter.hasNext());
LOG.info("Test poll all - DONE");
}
IterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testOther(){
LOG.info("Test other");
assertTrue(set.addAll(list));
Integer[] array=set.toArray(new Integer[0]);
assertEquals(NUM,array.length);
for (int i=0; i < array.length; i++) {
assertTrue(list.contains(array[i]));
}
assertEquals(NUM,set.size());
Object[] array2=set.toArray();
assertEquals(NUM,array2.length);
for (int i=0; i < array2.length; i++) {
assertTrue(list.contains(array2[i]));
}
LOG.info("Test capacity - DONE");
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testOneElementBasic(){
LOG.info("Test one element basic");
set.add(list.get(0));
assertEquals(1,set.size());
assertFalse(set.isEmpty());
Iterator iter=set.iterator();
assertTrue(iter.hasNext());
assertEquals(list.get(0),iter.next());
assertFalse(iter.hasNext());
LOG.info("Test one element basic - DONE");
}
IterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testPollNMulti(){
LOG.info("Test pollN multi");
set.addAll(list);
List l=set.pollN(10);
assertEquals(10,l.size());
for (int i=0; i < 10; i++) {
assertEquals(list.get(i),l.get(i));
}
l=set.pollN(1000);
assertEquals(NUM - 10,l.size());
for (int i=10; i < NUM; i++) {
assertEquals(list.get(i),l.get(i - 10));
}
assertTrue(set.isEmpty());
assertEquals(0,set.size());
LOG.info("Test pollN multi - DONE");
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* Expect connect timeout, because the connection backlog is consumed.
*/
@Test(timeout=TEST_TIMEOUT) public void testConnectTimeout() throws Exception {
consumeConnectionBacklog();
try {
fs.listFiles(new Path("/"),false);
fail("expected timeout");
}
catch ( SocketTimeoutException e) {
assertEquals("connect timed out",e.getMessage());
}
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* On the second step of two-step write, expect read timeout accessing the
* redirect location, because the bogus server never sends a reply.
*/
@Test(timeout=TEST_TIMEOUT) public void testTwoStepWriteReadTimeout() throws Exception {
startSingleTemporaryRedirectResponseThread(false);
OutputStream os=null;
try {
os=fs.create(new Path("/file"));
os.close();
os=null;
fail("expected timeout");
}
catch ( SocketTimeoutException e) {
assertEquals("Read timed out",e.getMessage());
}
finally {
IOUtils.cleanup(LOG,os);
}
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* Expect read timeout, because the bogus server never sends a reply.
*/
@Test(timeout=TEST_TIMEOUT) public void testReadTimeout() throws Exception {
try {
fs.listFiles(new Path("/"),false);
fail("expected timeout");
}
catch ( SocketTimeoutException e) {
assertEquals("Read timed out",e.getMessage());
}
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* After a redirect, expect read timeout accessing the redirect location,
* because the bogus server never sends a reply.
*/
@Test(timeout=TEST_TIMEOUT) public void testRedirectReadTimeout() throws Exception {
startSingleTemporaryRedirectResponseThread(false);
try {
fs.getFileChecksum(new Path("/file"));
fail("expected timeout");
}
catch ( SocketTimeoutException e) {
assertEquals("Read timed out",e.getMessage());
}
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* After a redirect, expect connect timeout accessing the redirect location,
* because the connection backlog is consumed.
*/
@Test(timeout=TEST_TIMEOUT) public void testRedirectConnectTimeout() throws Exception {
startSingleTemporaryRedirectResponseThread(true);
try {
fs.getFileChecksum(new Path("/file"));
fail("expected timeout");
}
catch ( SocketTimeoutException e) {
assertEquals("connect timed out",e.getMessage());
}
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* On the second step of two-step write, expect connect timeout accessing the
* redirect location, because the connection backlog is consumed.
*/
@Test(timeout=TEST_TIMEOUT) public void testTwoStepWriteConnectTimeout() throws Exception {
startSingleTemporaryRedirectResponseThread(true);
OutputStream os=null;
try {
os=fs.create(new Path("/file"));
fail("expected timeout");
}
catch ( SocketTimeoutException e) {
assertEquals("connect timed out",e.getMessage());
}
finally {
IOUtils.cleanup(LOG,os);
}
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* Expect read timeout on a URL that requires auth, because the bogus server
* never sends a reply.
*/
@Test(timeout=TEST_TIMEOUT) public void testAuthUrlReadTimeout() throws Exception {
try {
fs.getDelegationToken("renewer");
fail("expected timeout");
}
catch ( SocketTimeoutException e) {
assertEquals("Read timed out",e.getMessage());
}
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* Expect connect timeout on a URL that requires auth, because the connection
* backlog is consumed.
*/
@Test(timeout=TEST_TIMEOUT) public void testAuthUrlConnectTimeout() throws Exception {
consumeConnectionBacklog();
try {
fs.getDelegationToken("renewer");
fail("expected timeout");
}
catch ( SocketTimeoutException e) {
assertEquals("connect timed out",e.getMessage());
}
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierEqualityVerifier
@Test public void testRedirect() throws Exception {
final String dir="/testRedirect/";
final String filename="file";
final Path p=new Path(dir,filename);
final String[] writeStrings=createStrings("write to webhdfs ","write");
final String[] appendStrings=createStrings("append to webhdfs ","append");
for (int i=0; i < webhdfs.length; i++) {
final FSDataOutputStream out=webhdfs[i].create(p);
out.write(writeStrings[i].getBytes());
out.close();
}
for (int i=0; i < webhdfs.length; i++) {
final long expected=writeStrings[i].length();
Assert.assertEquals(expected,webhdfs[i].getFileStatus(p).getLen());
}
for (int i=0; i < webhdfs.length; i++) {
final FSDataInputStream in=webhdfs[i].open(p);
for (int c, j=0; (c=in.read()) != -1; j++) {
Assert.assertEquals(writeStrings[i].charAt(j),c);
}
in.close();
}
for (int i=0; i < webhdfs.length; i++) {
final FSDataOutputStream out=webhdfs[i].append(p);
out.write(appendStrings[i].getBytes());
out.close();
}
for (int i=0; i < webhdfs.length; i++) {
final long expected=writeStrings[i].length() + appendStrings[i].length();
Assert.assertEquals(expected,webhdfs[i].getFileStatus(p).getLen());
}
for (int i=0; i < webhdfs.length; i++) {
final StringBuilder b=new StringBuilder();
final FSDataInputStream in=webhdfs[i].open(p);
for (int c; (c=in.read()) != -1; ) {
b.append((char)c);
}
final int wlen=writeStrings[i].length();
Assert.assertEquals(writeStrings[i],b.substring(0,wlen));
Assert.assertEquals(appendStrings[i],b.substring(wlen));
in.close();
}
}
APIUtilityVerifierEqualityVerifier
@Test public void testToSortedStringEscapesURICharacters(){
final String sep="&";
Param,?> ampParam=new TokenArgumentParam("token&ersand");
Param,?> equalParam=new RenewerParam("renewer=equal");
final String expected="&renewer=renewer%3Dequal&token=token%26ampersand";
final String actual=Param.toSortedString(sep,equalParam,ampParam);
Assert.assertEquals(expected,actual);
}
EqualityVerifier
@Test public void testRenewerParam(){
final RenewerParam p=new RenewerParam(RenewerParam.DEFAULT);
Assert.assertEquals(null,p.getValue());
}
EqualityVerifier
@Test public void testXAttrNameParam(){
final XAttrNameParam p=new XAttrNameParam("user.a1");
Assert.assertEquals(p.getXAttrName(),"user.a1");
}
UtilityVerifierEqualityVerifierHybridVerifier
@Test public void testAclPermissionParam(){
final AclPermissionParam p=new AclPermissionParam("user::rwx,group::r--,other::rwx,user:user1:rwx");
List setAclList=AclEntry.parseAclSpec("user::rwx,group::r--,other::rwx,user:user1:rwx",true);
Assert.assertEquals(setAclList.toString(),p.getAclPermission(true).toString());
new AclPermissionParam("user::rw-,group::rwx,other::rw-,user:user1:rwx");
try {
new AclPermissionParam("user::rw--,group::rwx-,other::rw-");
Assert.fail();
}
catch ( IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
new AclPermissionParam("user::rw-,group::rwx,other::rw-,user:user1:rwx,group:group1:rwx,other::rwx,mask::rwx,default:user:user1:rwx");
try {
new AclPermissionParam("user:r-,group:rwx,other:rw-");
Assert.fail();
}
catch ( IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
try {
new AclPermissionParam("default:::r-,default:group::rwx,other::rw-");
Assert.fail();
}
catch ( IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
try {
new AclPermissionParam("user:r-,group::rwx,other:rw-,mask:rw-,temp::rwx");
Assert.fail();
}
catch ( IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
UtilityVerifierEqualityVerifierHybridVerifier
@Test public void testModificationTimeParam(){
final ModificationTimeParam p=new ModificationTimeParam(ModificationTimeParam.DEFAULT);
Assert.assertEquals(-1L,p.getValue().longValue());
new ModificationTimeParam(-1L);
try {
new ModificationTimeParam(-2L);
Assert.fail();
}
catch ( IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testReplicationParam(){
final ReplicationParam p=new ReplicationParam(ReplicationParam.DEFAULT);
Assert.assertEquals(null,p.getValue());
Assert.assertEquals((short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,DFSConfigKeys.DFS_REPLICATION_DEFAULT),p.getValue(conf));
new ReplicationParam((short)1);
try {
new ReplicationParam((short)0);
Assert.fail();
}
catch ( IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
APIUtilityVerifierIterativeVerifierEqualityVerifier
@Test public void testConcatSourcesParam(){
final String[] strings={"/","/foo","/bar"};
for (int n=0; n < strings.length; n++) {
final String[] sub=new String[n];
final Path[] paths=new Path[n];
for (int i=0; i < paths.length; i++) {
paths[i]=new Path(sub[i]=strings[i]);
}
final String expected=StringUtils.join(",",Arrays.asList(sub));
final ConcatSourcesParam computed=new ConcatSourcesParam(paths);
Assert.assertEquals(expected,computed.getValue());
}
}
EqualityVerifier
@Test public void testGroupParam(){
final GroupParam p=new GroupParam(GroupParam.DEFAULT);
Assert.assertEquals(null,p.getValue());
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testBufferSizeParam(){
final BufferSizeParam p=new BufferSizeParam(BufferSizeParam.DEFAULT);
Assert.assertEquals(null,p.getValue());
Assert.assertEquals(conf.getInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT),p.getValue(conf));
new BufferSizeParam(1);
try {
new BufferSizeParam(0);
Assert.fail();
}
catch ( IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
UtilityVerifierEqualityVerifierHybridVerifier
@Test public void testRecursiveParam(){
final RecursiveParam p=new RecursiveParam(RecursiveParam.DEFAULT);
Assert.assertEquals(false,p.getValue());
new RecursiveParam("falSe");
try {
new RecursiveParam("abc");
Assert.fail();
}
catch ( IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
UtilityVerifierEqualityVerifierHybridVerifier
@Test public void testOverwriteParam(){
final OverwriteParam p=new OverwriteParam(OverwriteParam.DEFAULT);
Assert.assertEquals(false,p.getValue());
new OverwriteParam("trUe");
try {
new OverwriteParam("abc");
Assert.fail();
}
catch ( IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
InternalCallVerifierEqualityVerifier
@Test public void testRenameOptionSetParam(){
final RenameOptionSetParam p=new RenameOptionSetParam(Options.Rename.OVERWRITE,Options.Rename.NONE);
final RenameOptionSetParam p1=new RenameOptionSetParam(p.getValueString());
Assert.assertEquals(p1.getValue(),EnumSet.of(Options.Rename.OVERWRITE,Options.Rename.NONE));
}
InternalCallVerifierEqualityVerifier
@Test public void testXAttrEncodingParam(){
final XAttrEncodingParam p=new XAttrEncodingParam(XAttrCodec.BASE64);
Assert.assertEquals(p.getEncoding(),XAttrCodec.BASE64);
final XAttrEncodingParam p1=new XAttrEncodingParam(p.getValueString());
Assert.assertEquals(p1.getEncoding(),XAttrCodec.BASE64);
}
EqualityVerifier
@Test public void testSnapshotNameParam(){
final OldSnapshotNameParam s1=new OldSnapshotNameParam("s1");
final SnapshotNameParam s2=new SnapshotNameParam("s2");
Assert.assertEquals("s1",s1.getValue());
Assert.assertEquals("s2",s2.getValue());
}
UtilityVerifierEqualityVerifierHybridVerifier
@Test public void testDestinationParam(){
final DestinationParam p=new DestinationParam(DestinationParam.DEFAULT);
Assert.assertEquals(null,p.getValue());
new DestinationParam("/abc");
try {
new DestinationParam("abc");
Assert.fail();
}
catch ( IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
UtilityVerifierEqualityVerifierHybridVerifier
@Test public void testPermissionParam(){
final PermissionParam p=new PermissionParam(PermissionParam.DEFAULT);
Assert.assertEquals(new FsPermission((short)0755),p.getFsPermission());
new PermissionParam("0");
try {
new PermissionParam("-1");
Assert.fail();
}
catch ( IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
new PermissionParam("1777");
try {
new PermissionParam("2000");
Assert.fail();
}
catch ( IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
try {
new PermissionParam("8");
Assert.fail();
}
catch ( IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
try {
new PermissionParam("abc");
Assert.fail();
}
catch ( IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
EqualityVerifier
@Test public void testXAttrValueParam() throws IOException {
final XAttrValueParam p=new XAttrValueParam("0x313233");
Assert.assertArrayEquals(p.getXAttrValue(),XAttrCodec.decodeValue("0x313233"));
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testBlockSizeParam(){
final BlockSizeParam p=new BlockSizeParam(BlockSizeParam.DEFAULT);
Assert.assertEquals(null,p.getValue());
Assert.assertEquals(conf.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT),p.getValue(conf));
new BlockSizeParam(1L);
try {
new BlockSizeParam(0L);
Assert.fail();
}
catch ( IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
InternalCallVerifierEqualityVerifier
@Test public void testXAttrSetFlagParam(){
EnumSet flag=EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE);
final XAttrSetFlagParam p=new XAttrSetFlagParam(flag);
Assert.assertEquals(p.getFlag(),flag);
final XAttrSetFlagParam p1=new XAttrSetFlagParam(p.getValueString());
Assert.assertEquals(p1.getFlag(),flag);
}
UtilityVerifierEqualityVerifierHybridVerifier
@Test public void testAccessTimeParam(){
final AccessTimeParam p=new AccessTimeParam(AccessTimeParam.DEFAULT);
Assert.assertEquals(-1L,p.getValue().longValue());
new AccessTimeParam(-1L);
try {
new AccessTimeParam(-2L);
Assert.fail();
}
catch ( IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
EqualityVerifier
@Test public void testDelegationParam(){
final DelegationParam p=new DelegationParam(DelegationParam.DEFAULT);
Assert.assertEquals(null,p.getValue());
}
EqualityVerifier
@Test public void testOwnerParam(){
final OwnerParam p=new OwnerParam(OwnerParam.DEFAULT);
Assert.assertEquals(null,p.getValue());
}
InternalCallVerifierEqualityVerifier
@Test public void testRequestQuoterWithNull() throws Exception {
HttpServletRequest request=Mockito.mock(HttpServletRequest.class);
Mockito.doReturn(null).when(request).getParameterValues("dummy");
RequestQuoter requestQuoter=new RequestQuoter(request);
String[] parameterValues=requestQuoter.getParameterValues("dummy");
Assert.assertEquals("It should return null " + "when there are no values for the parameter",null,parameterValues);
}
APIUtilityVerifierEqualityVerifier
/**
* Test that verifies headers can be up to 64K long.
* The test adds a 63K header leaving 1K for other headers.
* This is because the header buffer setting is for ALL headers,
* names and values included.
*/
@Test public void testLongHeader() throws Exception {
URL url=new URL(baseUrl,"/longheader");
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
StringBuilder sb=new StringBuilder();
for (int i=0; i < 63 * 1024; i++) {
sb.append("a");
}
conn.setRequestProperty("longheader",sb.toString());
assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
}
BooleanVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
/**
* Test the maximum number of threads cannot be exceeded.
*/
@Test public void testMaxThreads() throws Exception {
int clientThreads=MAX_THREADS * 10;
Executor executor=Executors.newFixedThreadPool(clientThreads);
final CountDownLatch ready=new CountDownLatch(clientThreads);
final CountDownLatch start=new CountDownLatch(1);
for (int i=0; i < clientThreads; i++) {
executor.execute(new Runnable(){
@Override public void run(){
ready.countDown();
try {
start.await();
assertEquals("a:b\nc:d\n",readOutput(new URL(baseUrl,"/echo?a=b&c=d")));
int serverThreads=server.webServer.getThreadPool().getThreads();
assertTrue("More threads are started than expected, Server Threads count: " + serverThreads,serverThreads <= MAX_THREADS);
System.out.println("Number of threads = " + serverThreads + " which is less or equal than the max = "+ MAX_THREADS);
}
catch ( Exception e) {
}
}
}
);
}
ready.await();
start.countDown();
}
EqualityVerifier
/**
* Test the echo map servlet that uses getParameterMap.
*/
@Test public void testEchoMap() throws Exception {
assertEquals("a:b\nc:d\n",readOutput(new URL(baseUrl,"/echomap?a=b&c=d")));
assertEquals("a:b,>\nc<:d\n",readOutput(new URL(baseUrl,"/echomap?a=b&c<=d&a=>")));
}
InternalCallVerifierEqualityVerifier
/**
* Verify the access for /logs, /stacks, /conf, /logLevel and /metrics
* servlets, when authentication filters are set, but authorization is not
* enabled.
* @throws Exception
*/
@Test public void testDisabledAuthorizationOfDefaultServlets() throws Exception {
Configuration conf=new Configuration();
conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,DummyFilterInitializer.class.getName());
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,MyGroupsProvider.class.getName());
Groups.getUserToGroupsMappingService(conf);
MyGroupsProvider.clearMapping();
MyGroupsProvider.mapping.put("userA",Arrays.asList("groupA"));
MyGroupsProvider.mapping.put("userB",Arrays.asList("groupB"));
HttpServer2 myServer=new HttpServer2.Builder().setName("test").addEndpoint(new URI("http://localhost:0")).setFindPort(true).build();
myServer.setAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE,conf);
myServer.start();
String serverURL="http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/";
for ( String servlet : new String[]{"conf","logs","stacks","logLevel","metrics"}) {
for ( String user : new String[]{"userA","userB"}) {
assertEquals(HttpURLConnection.HTTP_OK,getHttpStatusCode(serverURL + servlet,user));
}
}
myServer.stop();
}
APIUtilityVerifierEqualityVerifier
@Test public void testJersey() throws Exception {
LOG.info("BEGIN testJersey()");
final String js=readOutput(new URL(baseUrl,"/jersey/foo?op=bar"));
final Map m=parse(js);
LOG.info("m=" + m);
assertEquals("foo",m.get(JerseyResource.PATH));
assertEquals("bar",m.get(JerseyResource.OP));
LOG.info("END testJersey()");
}
InternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test that the server is alive once started
* @throws Throwableon failure
*/
@Test public void testWepAppContextAfterServerStop() throws Throwable {
HttpServer2 server=null;
String key="test.attribute.key";
String value="test.attribute.value";
server=createTestServer();
assertNotLive(server);
server.start();
server.setAttribute(key,value);
assertAlive(server);
assertEquals(value,server.getAttribute(key));
stop(server);
assertNull("Server context should have cleared",server.getAttribute(key));
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testOldFormat() throws IOException {
ObjectWritable.writeObject(out,i,i.getClass(),null);
in.reset(out.getData(),out.getLength());
@SuppressWarnings("deprecation") String className=UTF8.readString(in);
assertEquals("The int[] written by ObjectWritable as a non-compact array " + "was not labelled as an array of int",i.getClass().getName(),className);
int length=in.readInt();
assertEquals("The int[] written by ObjectWritable as a non-compact array " + "was not expected length",i.length,length);
int[] readValue=new int[length];
try {
for (int i=0; i < length; i++) {
readValue[i]=(int)((Integer)ObjectWritable.readObject(in,null));
}
}
catch ( Exception e) {
fail("The int[] written by ObjectWritable as a non-compact array " + "was corrupted. Failed to correctly read int[] of length " + length + ". Got exception:\n"+ StringUtils.stringifyException(e));
}
assertTrue("The int[] written by ObjectWritable as a non-compact array " + "was corrupted.",Arrays.equals(i,readValue));
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test @SuppressWarnings("deprecation") public void testObjectLabeling() throws IOException {
ObjectWritable.writeObject(out,i,i.getClass(),null,true);
ArrayPrimitiveWritable apw=new ArrayPrimitiveWritable(i);
ObjectWritable.writeObject(out,apw,apw.getClass(),null,true);
in.reset(out.getData(),out.getLength());
String className=UTF8.readString(in);
assertEquals("The int[] written by ObjectWritable was not labelled as " + "an ArrayPrimitiveWritable.Internal",ArrayPrimitiveWritable.Internal.class.getName(),className);
ArrayPrimitiveWritable.Internal apwi=new ArrayPrimitiveWritable.Internal();
apwi.readFields(in);
assertEquals("The ArrayPrimitiveWritable.Internal component type was corrupted",int.class,apw.getComponentType());
assertTrue("The int[] written by ObjectWritable as " + "ArrayPrimitiveWritable.Internal was corrupted",Arrays.equals(i,(int[])(apwi.get())));
String declaredClassName=UTF8.readString(in);
assertEquals("The APW written by ObjectWritable was not labelled as " + "declaredClass ArrayPrimitiveWritable",ArrayPrimitiveWritable.class.getName(),declaredClassName);
className=UTF8.readString(in);
assertEquals("The APW written by ObjectWritable was not labelled as " + "class ArrayPrimitiveWritable",ArrayPrimitiveWritable.class.getName(),className);
ArrayPrimitiveWritable apw2=new ArrayPrimitiveWritable();
apw2.readFields(in);
assertEquals("The ArrayPrimitiveWritable component type was corrupted",int.class,apw2.getComponentType());
assertTrue("The int[] written by ObjectWritable as " + "ArrayPrimitiveWritable was corrupted",Arrays.equals(i,(int[])(apw2.get())));
}
IterativeVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testMany() throws IOException {
for ( Object x : bigSet) {
ObjectWritable.writeObject(out,x,x.getClass(),null,true);
(new ArrayPrimitiveWritable(x)).write(out);
}
in.reset(out.getData(),out.getLength());
for (int x=0; x < resultSet.length; ) {
resultSet[x++]=ObjectWritable.readObject(in,null);
ArrayPrimitiveWritable apw=new ArrayPrimitiveWritable();
apw.readFields(in);
resultSet[x++]=apw.get();
}
assertEquals(expectedResultSet.length,resultSet.length);
for (int x=0; x < resultSet.length; x++) {
assertEquals("ComponentType of array " + x,expectedResultSet[x].getClass().getComponentType(),resultSet[x].getClass().getComponentType());
}
assertTrue("In and Out arrays didn't match values",Arrays.deepEquals(expectedResultSet,resultSet));
}
InternalCallVerifierBooleanVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
/**
* This test was written as result of adding the new zero
* copy constructor and set method to BytesWritable. These
* methods allow users to specify the backing buffer of the
* BytesWritable instance and a length.
*/
@Test public void testZeroCopy(){
byte[] bytes="brock".getBytes();
BytesWritable zeroBuf=new BytesWritable(bytes,bytes.length);
BytesWritable copyBuf=new BytesWritable(bytes);
assertTrue("copy took place, backing array != array passed to constructor",bytes == zeroBuf.getBytes());
assertTrue("length of BW should backing byte array",zeroBuf.getLength() == bytes.length);
assertEquals("objects with same backing array should be equal",zeroBuf,copyBuf);
assertEquals("string repr of objects with same backing array should be equal",zeroBuf.toString(),copyBuf.toString());
assertTrue("compare order objects with same backing array should be equal",zeroBuf.compareTo(copyBuf) == 0);
assertTrue("hash of objects with same backing array should be equal",zeroBuf.hashCode() == copyBuf.hashCode());
byte[] buffer=new byte[bytes.length * 5];
zeroBuf.set(buffer,0,buffer.length);
zeroBuf.set(bytes,0,bytes.length);
assertEquals("buffer created with (array, len) has bad contents",zeroBuf,copyBuf);
assertTrue("buffer created with (array, len) has bad length",zeroBuf.getLength() == copyBuf.getLength());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=1000) public void testPutAll(){
SortedMapWritable map1=new SortedMapWritable();
SortedMapWritable map2=new SortedMapWritable();
map1.put(new Text("key"),new Text("value"));
map2.putAll(map1);
assertEquals("map1 entries don't match map2 entries",map1,map2);
assertTrue("map2 doesn't have class information from map1",map2.classToIdMap.containsKey(Text.class) && map2.idToClassMap.containsValue(Text.class));
}
IterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* the test
*/
@Test @SuppressWarnings("unchecked") public void testSortedMapWritable(){
Text[] keys={new Text("key1"),new Text("key2"),new Text("key3")};
BytesWritable[] values={new BytesWritable("value1".getBytes()),new BytesWritable("value2".getBytes()),new BytesWritable("value3".getBytes())};
SortedMapWritable inMap=new SortedMapWritable();
for (int i=0; i < keys.length; i++) {
inMap.put(keys[i],values[i]);
}
assertEquals(0,inMap.firstKey().compareTo(keys[0]));
assertEquals(0,inMap.lastKey().compareTo(keys[2]));
SortedMapWritable outMap=new SortedMapWritable(inMap);
assertEquals(inMap.size(),outMap.size());
for ( Map.Entry e : inMap.entrySet()) {
assertTrue(outMap.containsKey(e.getKey()));
assertEquals(0,((WritableComparable)outMap.get(e.getKey())).compareTo(e.getValue()));
}
Text[] maps={new Text("map1"),new Text("map2")};
SortedMapWritable mapOfMaps=new SortedMapWritable();
mapOfMaps.put(maps[0],inMap);
mapOfMaps.put(maps[1],outMap);
SortedMapWritable copyOfMapOfMaps=new SortedMapWritable(mapOfMaps);
for (int i=0; i < maps.length; i++) {
assertTrue(copyOfMapOfMaps.containsKey(maps[i]));
SortedMapWritable a=(SortedMapWritable)mapOfMaps.get(maps[i]);
SortedMapWritable b=(SortedMapWritable)copyOfMapOfMaps.get(maps[i]);
assertEquals(a.size(),b.size());
for ( Writable key : a.keySet()) {
assertTrue(b.containsKey(key));
WritableComparable aValue=(WritableComparable)a.get(key);
WritableComparable bValue=(WritableComparable)b.get(key);
assertEquals(0,aValue.compareTo(bValue));
}
}
}
InternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Tests if equal and hashCode method still hold the contract.
*/
@Test public void testEqualsAndHashCode(){
String failureReason;
SortedMapWritable mapA=new SortedMapWritable();
SortedMapWritable mapB=new SortedMapWritable();
failureReason="SortedMapWritable couldn't be initialized. Got null reference";
assertNotNull(failureReason,mapA);
assertNotNull(failureReason,mapB);
assertFalse("equals method returns true when passed null",mapA.equals(null));
assertTrue("Two empty SortedMapWritables are no longer equal",mapA.equals(mapB));
Text[] keys={new Text("key1"),new Text("key2")};
BytesWritable[] values={new BytesWritable("value1".getBytes()),new BytesWritable("value2".getBytes())};
mapA.put(keys[0],values[0]);
mapB.put(keys[1],values[1]);
failureReason="Two SortedMapWritables with different data are now equal";
assertTrue(failureReason,mapA.hashCode() != mapB.hashCode());
assertTrue(failureReason,!mapA.equals(mapB));
assertTrue(failureReason,!mapB.equals(mapA));
mapA.put(keys[1],values[1]);
mapB.put(keys[0],values[0]);
failureReason="Two SortedMapWritables with same entry sets formed in different order are now different";
assertEquals(failureReason,mapA.hashCode(),mapB.hashCode());
assertTrue(failureReason,mapA.equals(mapB));
assertTrue(failureReason,mapB.equals(mapA));
mapA.put(keys[0],values[1]);
mapA.put(keys[1],values[0]);
failureReason="Two SortedMapWritables with different content are now equal";
assertTrue(failureReason,mapA.hashCode() != mapB.hashCode());
assertTrue(failureReason,!mapA.equals(mapB));
assertTrue(failureReason,!mapB.equals(mapA));
}
InternalCallVerifierEqualityVerifier
/**
* Test that number of "unknown" classes is propagated across multiple copies.
*/
@Test @SuppressWarnings("deprecation") public void testForeignClass(){
SortedMapWritable inMap=new SortedMapWritable();
inMap.put(new Text("key"),new UTF8("value"));
inMap.put(new Text("key2"),new UTF8("value2"));
SortedMapWritable outMap=new SortedMapWritable(inMap);
SortedMapWritable copyOfCopy=new SortedMapWritable(outMap);
assertEquals(1,copyOfCopy.getNewClasses());
}
Class: org.apache.hadoop.io.compress.TestCodec
APIUtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testGzipCodecRead() throws IOException {
Configuration conf=new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,false);
assertFalse("ZlibFactory is using native libs against request",ZlibFactory.isNativeZlibLoaded(conf));
Decompressor zlibDecompressor=ZlibFactory.getZlibDecompressor(conf);
assertNotNull("zlibDecompressor is null!",zlibDecompressor);
assertTrue("ZlibFactory returned unexpected inflator",zlibDecompressor instanceof BuiltInZlibInflater);
CodecPool.returnDecompressor(zlibDecompressor);
String tmpDir=System.getProperty("test.build.data","/tmp/");
Path f=new Path(new Path(tmpDir),"testGzipCodecRead.txt.gz");
BufferedWriter bw=new BufferedWriter(new OutputStreamWriter(new GZIPOutputStream(new FileOutputStream(f.toString()))));
final String msg="This is the message in the file!";
bw.write(msg);
bw.close();
CompressionCodecFactory ccf=new CompressionCodecFactory(conf);
CompressionCodec codec=ccf.getCodec(f);
Decompressor decompressor=CodecPool.getDecompressor(codec);
FileSystem fs=FileSystem.getLocal(conf);
InputStream is=fs.open(f);
is=codec.createInputStream(is,decompressor);
BufferedReader br=new BufferedReader(new InputStreamReader(is));
String line=br.readLine();
assertEquals("Didn't get the same message back!",msg,line);
br.close();
}
UtilityVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testOpenWithCreate() throws Exception {
if (Path.WINDOWS) {
return;
}
LOG.info("Test creating a file with O_CREAT");
FileDescriptor fd=NativeIO.POSIX.open(new File(TEST_DIR,"testWorkingOpen").getAbsolutePath(),NativeIO.POSIX.O_WRONLY | NativeIO.POSIX.O_CREAT,0700);
assertNotNull(true);
assertTrue(fd.valid());
FileOutputStream fos=new FileOutputStream(fd);
fos.write("foo".getBytes());
fos.close();
assertFalse(fd.valid());
LOG.info("Test exclusive create");
try {
fd=NativeIO.POSIX.open(new File(TEST_DIR,"testWorkingOpen").getAbsolutePath(),NativeIO.POSIX.O_WRONLY | NativeIO.POSIX.O_CREAT | NativeIO.POSIX.O_EXCL,0700);
fail("Was able to create existing file with O_EXCL");
}
catch ( NativeIOException nioe) {
LOG.info("Got expected exception for failed exclusive create",nioe);
assertEquals(Errno.EEXIST,nioe.getErrno());
}
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test for races in fstat usage
* NOTE: this test is likely to fail on RHEL 6.0 which has a non-threadsafe
* implementation of getpwuid_r.
*/
@Test(timeout=30000) public void testMultiThreadedFstat() throws Exception {
if (Path.WINDOWS) {
return;
}
final FileOutputStream fos=new FileOutputStream(new File(TEST_DIR,"testfstat"));
final AtomicReference thrown=new AtomicReference();
List statters=new ArrayList();
for (int i=0; i < 10; i++) {
Thread statter=new Thread(){
@Override public void run(){
long et=Time.now() + 5000;
while (Time.now() < et) {
try {
NativeIO.POSIX.Stat stat=NativeIO.POSIX.getFstat(fos.getFD());
assertEquals(System.getProperty("user.name"),stat.getOwner());
assertNotNull(stat.getGroup());
assertTrue(!stat.getGroup().isEmpty());
assertEquals("Stat mode field should indicate a regular file",NativeIO.POSIX.Stat.S_IFREG,stat.getMode() & NativeIO.POSIX.Stat.S_IFMT);
}
catch ( Throwable t) {
thrown.set(t);
}
}
}
}
;
statters.add(statter);
statter.start();
}
for ( Thread t : statters) {
t.join();
}
fos.close();
if (thrown.get() != null) {
throw new RuntimeException(thrown.get());
}
}
APIUtilityVerifierAssumptionSetterEqualityVerifierHybridVerifier
@Test(timeout=10000) public void testMlock() throws Exception {
assumeTrue(NativeIO.isAvailable());
final File TEST_FILE=new File(new File(System.getProperty("test.build.data","build/test/data")),"testMlockFile");
final int BUF_LEN=12289;
byte buf[]=new byte[BUF_LEN];
int bufSum=0;
for (int i=0; i < buf.length; i++) {
buf[i]=(byte)(i % 60);
bufSum+=buf[i];
}
FileOutputStream fos=new FileOutputStream(TEST_FILE);
try {
fos.write(buf);
fos.getChannel().force(true);
}
finally {
fos.close();
}
FileInputStream fis=null;
FileChannel channel=null;
try {
fis=new FileInputStream(TEST_FILE);
channel=fis.getChannel();
long fileSize=channel.size();
MappedByteBuffer mapbuf=channel.map(MapMode.READ_ONLY,0,fileSize);
NativeIO.POSIX.mlock(mapbuf,fileSize);
int sum=0;
for (int i=0; i < fileSize; i++) {
sum+=mapbuf.get(i);
}
assertEquals("Expected sums to be equal",bufSum,sum);
NativeIO.POSIX.munmap(mapbuf);
}
finally {
if (channel != null) {
channel.close();
}
if (fis != null) {
fis.close();
}
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testFstat() throws Exception {
FileOutputStream fos=new FileOutputStream(new File(TEST_DIR,"testfstat"));
NativeIO.POSIX.Stat stat=NativeIO.POSIX.getFstat(fos.getFD());
fos.close();
LOG.info("Stat: " + String.valueOf(stat));
String owner=stat.getOwner();
String expectedOwner=System.getProperty("user.name");
if (Path.WINDOWS) {
UserGroupInformation ugi=UserGroupInformation.createRemoteUser(expectedOwner);
final String adminsGroupString="Administrators";
if (Arrays.asList(ugi.getGroupNames()).contains(adminsGroupString)) {
expectedOwner=adminsGroupString;
}
}
assertEquals(expectedOwner,owner);
assertNotNull(stat.getGroup());
assertTrue(!stat.getGroup().isEmpty());
assertEquals("Stat mode field should indicate a regular file",NativeIO.POSIX.Stat.S_IFREG,stat.getMode() & NativeIO.POSIX.Stat.S_IFMT);
}
UtilityVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testOpenMissingWithoutCreate() throws Exception {
if (Path.WINDOWS) {
return;
}
LOG.info("Open a missing file without O_CREAT and it should fail");
try {
FileDescriptor fd=NativeIO.POSIX.open(new File(TEST_DIR,"doesntexist").getAbsolutePath(),NativeIO.POSIX.O_WRONLY,0700);
fail("Able to open a new file without O_CREAT");
}
catch ( NativeIOException nioe) {
LOG.info("Got expected exception",nioe);
assertEquals(Errno.ENOENT,nioe.getErrno());
}
}
UtilityVerifierEqualityVerifierHybridVerifier
@Test public void testNeverFailOver() throws UnreliableException, IOException, StandbyException {
UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,newFlipFlopProxyProvider(),RetryPolicies.TRY_ONCE_THEN_FAIL);
unreliable.succeedsOnceThenFailsReturningString();
try {
unreliable.succeedsOnceThenFailsReturningString();
fail("should not have succeeded twice");
}
catch ( UnreliableException e) {
assertEquals("impl1",e.getMessage());
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Test that concurrent failed method invocations only result in a single
* failover.
*/
@Test public void testConcurrentMethodFailures() throws InterruptedException {
FlipFlopProxyProvider proxyProvider=new FlipFlopProxyProvider(UnreliableInterface.class,new SynchronizedUnreliableImplementation("impl1",TypeOfExceptionToFailWith.STANDBY_EXCEPTION,2),new UnreliableImplementation("impl2",TypeOfExceptionToFailWith.STANDBY_EXCEPTION));
final UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,proxyProvider,RetryPolicies.failoverOnNetworkException(10));
ConcurrentMethodThread t1=new ConcurrentMethodThread(unreliable);
ConcurrentMethodThread t2=new ConcurrentMethodThread(unreliable);
t1.start();
t2.start();
t1.join();
t2.join();
assertEquals("impl2",t1.result);
assertEquals("impl2",t2.result);
assertEquals(1,proxyProvider.getFailoversOccurred());
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Ensure that when all configured services are throwing StandbyException
* that we fail over back and forth between them until one is no longer
* throwing StandbyException.
*/
@Test public void testFailoverBetweenMultipleStandbys() throws UnreliableException, StandbyException, IOException {
final long millisToSleep=10000;
final UnreliableImplementation impl1=new UnreliableImplementation("impl1",TypeOfExceptionToFailWith.STANDBY_EXCEPTION);
FlipFlopProxyProvider proxyProvider=new FlipFlopProxyProvider(UnreliableInterface.class,impl1,new UnreliableImplementation("impl2",TypeOfExceptionToFailWith.STANDBY_EXCEPTION));
final UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,proxyProvider,RetryPolicies.failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,10,1000,10000));
new Thread(){
@Override public void run(){
ThreadUtil.sleepAtLeastIgnoreInterrupts(millisToSleep);
impl1.setIdentifier("renamed-impl1");
}
}
.start();
String result=unreliable.failsIfIdentifierDoesntMatch("renamed-impl1");
assertEquals("renamed-impl1",result);
}
Class: org.apache.hadoop.io.retry.TestRetryProxy
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test for {@link RetryInvocationHandler#isRpcInvocation(Object)}
*/
@Test public void testRpcInvocation() throws Exception {
final UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,unreliableImpl,RETRY_FOREVER);
assertTrue(RetryInvocationHandler.isRpcInvocation(unreliable));
ProtocolTranslator xlator=new ProtocolTranslator(){
int count=0;
@Override public Object getUnderlyingProxyObject(){
count++;
return unreliable;
}
@Override public String toString(){
return "" + count;
}
}
;
assertTrue(RetryInvocationHandler.isRpcInvocation(xlator));
assertEquals(xlator.toString(),"1");
assertFalse(RetryInvocationHandler.isRpcInvocation(new Object()));
}
EqualityVerifier
/**
* Test the retry count while used in a retry proxy.
*/
@Test(timeout=60000) public void testRetryProxy() throws IOException {
final Client client=new Client(LongWritable.class,conf);
final TestServer server=new TestServer(1,false);
server.callListener=new Runnable(){
private int retryCount=0;
@Override public void run(){
Assert.assertEquals(retryCount++,Server.getCallRetryCount());
}
}
;
final int totalRetry=10000;
DummyProtocol proxy=(DummyProtocol)Proxy.newProxyInstance(DummyProtocol.class.getClassLoader(),new Class[]{DummyProtocol.class},new TestInvocationHandler(client,server,totalRetry));
DummyProtocol retryProxy=(DummyProtocol)RetryProxy.create(DummyProtocol.class,proxy,RetryPolicies.RETRY_FOREVER);
try {
server.start();
retryProxy.dummyRun();
Assert.assertEquals(TestInvocationHandler.retry,totalRetry + 1);
}
finally {
Client.setCallIdAndRetryCount(0,0);
client.stop();
server.stop();
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test if the rpc server gets the retry count from client.
*/
@Test(timeout=60000) public void testCallRetryCount() throws IOException {
final int retryCount=255;
final Client client=new Client(LongWritable.class,conf);
Client.setCallIdAndRetryCount(Client.nextCallId(),255);
final TestServer server=new TestServer(1,false);
server.callListener=new Runnable(){
@Override public void run(){
Assert.assertEquals(retryCount,Server.getCallRetryCount());
}
}
;
try {
InetSocketAddress addr=NetUtils.getConnectAddress(server);
server.start();
final SerialCaller caller=new SerialCaller(client,addr,10);
caller.run();
assertFalse(caller.failed);
}
finally {
client.stop();
server.stop();
}
}
IterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testConnectionIdleTimeouts() throws Exception {
((Log4JLogger)Server.LOG).getLogger().setLevel(Level.DEBUG);
final int maxIdle=1000;
final int cleanupInterval=maxIdle * 3 / 4;
final int killMax=3;
final int clients=1 + killMax * 2;
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,maxIdle);
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_IDLETHRESHOLD_KEY,0);
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_KILL_MAX_KEY,killMax);
conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECTION_IDLESCANINTERVAL_KEY,cleanupInterval);
final CyclicBarrier firstCallBarrier=new CyclicBarrier(2);
final CyclicBarrier callBarrier=new CyclicBarrier(clients);
final CountDownLatch allCallLatch=new CountDownLatch(clients);
final AtomicBoolean error=new AtomicBoolean();
final TestServer server=new TestServer(clients,false);
Thread[] threads=new Thread[clients];
try {
server.callListener=new Runnable(){
AtomicBoolean first=new AtomicBoolean(true);
@Override public void run(){
try {
allCallLatch.countDown();
if (first.compareAndSet(true,false)) {
firstCallBarrier.await();
}
else {
callBarrier.await();
}
}
catch ( Throwable t) {
LOG.error(t);
error.set(true);
}
}
}
;
server.start();
final CountDownLatch callReturned=new CountDownLatch(clients - 1);
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
final Configuration clientConf=new Configuration();
clientConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,10000);
for (int i=0; i < clients; i++) {
threads[i]=new Thread(new Runnable(){
@Override public void run(){
Client client=new Client(LongWritable.class,clientConf);
try {
client.call(new LongWritable(Thread.currentThread().getId()),addr,null,null,0,clientConf);
callReturned.countDown();
Thread.sleep(10000);
}
catch ( IOException e) {
LOG.error(e);
}
catch ( InterruptedException e) {
}
}
}
);
threads[i].start();
}
allCallLatch.await();
assertFalse(error.get());
assertEquals(clients,server.getNumOpenConnections());
callBarrier.await();
callReturned.await();
assertEquals(clients,server.getNumOpenConnections());
Thread.sleep(maxIdle * 2 - cleanupInterval);
for (int i=clients; i > 1; i-=killMax) {
Thread.sleep(cleanupInterval);
assertFalse(error.get());
assertEquals(i,server.getNumOpenConnections());
}
Thread.sleep(cleanupInterval);
assertFalse(error.get());
assertEquals(1,server.getNumOpenConnections());
firstCallBarrier.await();
Thread.sleep(maxIdle * 2);
assertFalse(error.get());
assertEquals(0,server.getNumOpenConnections());
}
finally {
for ( Thread t : threads) {
if (t != null) {
t.interrupt();
t.join();
}
server.stop();
}
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierPublicFieldVerifierHybridVerifier
/**
* Test if
* (1) the rpc server uses the call id/retry provided by the rpc client, and
* (2) the rpc client receives the same call id/retry from the rpc server.
*/
@Test(timeout=60000) public void testCallIdAndRetry() throws IOException {
final CallInfo info=new CallInfo();
final Client client=new Client(LongWritable.class,conf){
@Override Call createCall( RpcKind rpcKind, Writable rpcRequest){
final Call call=super.createCall(rpcKind,rpcRequest);
info.id=call.id;
info.retry=call.retry;
return call;
}
@Override void checkResponse( RpcResponseHeaderProto header) throws IOException {
super.checkResponse(header);
Assert.assertEquals(info.id,header.getCallId());
Assert.assertEquals(info.retry,header.getRetryCount());
}
}
;
final TestServer server=new TestServer(1,false);
server.callListener=new Runnable(){
@Override public void run(){
Assert.assertEquals(info.id,Server.getCallId());
Assert.assertEquals(info.retry,Server.getCallRetryCount());
}
}
;
try {
InetSocketAddress addr=NetUtils.getConnectAddress(server);
server.start();
final SerialCaller caller=new SerialCaller(client,addr,10);
caller.run();
assertFalse(caller.failed);
}
finally {
client.stop();
server.stop();
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test if the rpc server gets the default retry count (0) from client.
*/
@Test(timeout=60000) public void testInitialCallRetryCount() throws IOException {
final Client client=new Client(LongWritable.class,conf);
final TestServer server=new TestServer(1,false);
server.callListener=new Runnable(){
@Override public void run(){
Assert.assertEquals(0,Server.getCallRetryCount());
}
}
;
try {
InetSocketAddress addr=NetUtils.getConnectAddress(server);
server.start();
final SerialCaller caller=new SerialCaller(client,addr,10);
caller.run();
assertFalse(caller.failed);
}
finally {
client.stop();
server.stop();
}
}
APIUtilityVerifierIterativeVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Tests that client generates a unique sequential call ID for each RPC call,
* even if multiple threads are using the same client.
* @throws InterruptedException
*/
@Test(timeout=60000) public void testUniqueSequentialCallIds() throws IOException, InterruptedException {
int serverThreads=10, callerCount=100, perCallerCallCount=100;
TestServer server=new TestServer(serverThreads,false);
final List callIds=Collections.synchronizedList(new ArrayList());
server.callListener=new Runnable(){
@Override public void run(){
callIds.add(Server.getCallId());
}
}
;
Client client=new Client(LongWritable.class,conf);
try {
InetSocketAddress addr=NetUtils.getConnectAddress(server);
server.start();
SerialCaller[] callers=new SerialCaller[callerCount];
for (int i=0; i < callerCount; ++i) {
callers[i]=new SerialCaller(client,addr,perCallerCallCount);
callers[i].start();
}
for (int i=0; i < callerCount; ++i) {
callers[i].join();
assertFalse(callers[i].failed);
}
}
finally {
client.stop();
server.stop();
}
int expectedCallCount=callerCount * perCallerCallCount;
assertEquals(expectedCallCount,callIds.size());
Collections.sort(callIds);
final int startID=callIds.get(0).intValue();
for (int i=0; i < expectedCallCount; ++i) {
assertEquals(startID + i,callIds.get(i).intValue());
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testConfRpc() throws IOException {
Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(1).setVerbose(false).build();
int confQ=conf.getInt(CommonConfigurationKeys.IPC_SERVER_HANDLER_QUEUE_SIZE_KEY,CommonConfigurationKeys.IPC_SERVER_HANDLER_QUEUE_SIZE_DEFAULT);
assertEquals(confQ,server.getMaxQueueSize());
int confReaders=conf.getInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY,CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_DEFAULT);
assertEquals(confReaders,server.getNumReaders());
server.stop();
server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(1).setnumReaders(3).setQueueSizePerHandler(200).setVerbose(false).build();
assertEquals(3,server.getNumReaders());
assertEquals(200,server.getMaxQueueSize());
server.stop();
}
BooleanVerifierEqualityVerifierHybridVerifier
/**
* Test that server.stop() properly stops all threads
*/
@Test public void testStopsAllThreads() throws IOException, InterruptedException {
int threadsBefore=countThreads("Server$Listener$Reader");
assertEquals("Expect no Reader threads running before test",0,threadsBefore);
final Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).build();
server.start();
try {
int threadsRunning=0;
long totalSleepTime=0;
do {
totalSleepTime+=10;
Thread.sleep(10);
threadsRunning=countThreads("Server$Listener$Reader");
}
while (threadsRunning == 0 && totalSleepTime < 5000);
threadsRunning=countThreads("Server$Listener$Reader");
assertTrue(threadsRunning > 0);
}
finally {
server.stop();
}
int threadsAfter=countThreads("Server$Listener$Reader");
assertEquals("Expect no Reader threads left running after test",0,threadsAfter);
}
Class: org.apache.hadoop.ipc.TestRPCCallBenchmark
EqualityVerifier
@Test(timeout=20000) public void testBenchmarkWithProto() throws Exception {
int rc=ToolRunner.run(new RPCCallBenchmark(),new String[]{"--clientThreads","30","--serverThreads","30","--time","5","--serverReaderThreads","4","--messageSize","1024","--engine","protobuf"});
assertEquals(0,rc);
}
EqualityVerifier
@Test(timeout=20000) public void testBenchmarkWithWritable() throws Exception {
int rc=ToolRunner.run(new RPCCallBenchmark(),new String[]{"--clientThreads","30","--serverThreads","30","--time","5","--serverReaderThreads","4","--messageSize","1024","--engine","writable"});
assertEquals(0,rc);
}
Class: org.apache.hadoop.ipc.TestRPCCompatibility
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Verify that ProtocolMetaInfoServerSideTranslatorPB correctly looks up
* the server registry to extract protocol signatures and versions.
*/
@Test public void testProtocolMetaInfoSSTranslatorPB() throws Exception {
TestImpl1 impl=new TestImpl1();
server=new RPC.Builder(conf).setProtocol(TestProtocol1.class).setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build();
server.addProtocol(RPC.RpcKind.RPC_WRITABLE,TestProtocol0.class,impl);
server.start();
ProtocolMetaInfoServerSideTranslatorPB xlator=new ProtocolMetaInfoServerSideTranslatorPB(server);
GetProtocolSignatureResponseProto resp=xlator.getProtocolSignature(null,createGetProtocolSigRequestProto(TestProtocol1.class,RPC.RpcKind.RPC_PROTOCOL_BUFFER));
Assert.assertEquals(0,resp.getProtocolSignatureCount());
resp=xlator.getProtocolSignature(null,createGetProtocolSigRequestProto(TestProtocol1.class,RPC.RpcKind.RPC_WRITABLE));
Assert.assertEquals(1,resp.getProtocolSignatureCount());
ProtocolSignatureProto sig=resp.getProtocolSignatureList().get(0);
Assert.assertEquals(TestProtocol1.versionID,sig.getVersion());
boolean found=false;
int expected=ProtocolSignature.getFingerprint(TestProtocol1.class.getMethod("echo",String.class));
for ( int m : sig.getMethodsList()) {
if (expected == m) {
found=true;
break;
}
}
Assert.assertTrue(found);
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testHashCode() throws Exception {
Method strMethod=TestProtocol3.class.getMethod("echo",String.class);
int stringEchoHash=ProtocolSignature.getFingerprint(strMethod);
Method intMethod=TestProtocol3.class.getMethod("echo",int.class);
int intEchoHash=ProtocolSignature.getFingerprint(intMethod);
assertFalse(stringEchoHash == intEchoHash);
int intEchoHash1=ProtocolSignature.getFingerprint(TestProtocol2.class.getMethod("echo",int.class));
assertEquals(intEchoHash,intEchoHash1);
int stringEchoHash1=ProtocolSignature.getFingerprint(TestProtocol2.class.getMethod("echo",String.class));
assertFalse(stringEchoHash == stringEchoHash1);
int intEchoHashAlias=ProtocolSignature.getFingerprint(TestProtocol3.class.getMethod("echo_alias",int.class));
assertFalse(intEchoHash == intEchoHashAlias);
int intEchoHash2=ProtocolSignature.getFingerprint(TestProtocol3.class.getMethod("echo",int.class,int.class));
assertFalse(intEchoHash == intEchoHash2);
int hash1=ProtocolSignature.getFingerprint(new Method[]{intMethod,strMethod});
int hash2=ProtocolSignature.getFingerprint(new Method[]{strMethod,intMethod});
assertEquals(hash1,hash2);
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierIdentityVerifierEqualityVerifierHybridVerifier
@Test public void testPerConnectionConf() throws Exception {
TestTokenSecretManager sm=new TestTokenSecretManager();
final Server server=new RPC.Builder(conf).setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).setSecretManager(sm).build();
server.start();
final UserGroupInformation current=UserGroupInformation.getCurrentUser();
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
TestTokenIdentifier tokenId=new TestTokenIdentifier(new Text(current.getUserName()));
Token token=new Token(tokenId,sm);
SecurityUtil.setTokenService(token,addr);
current.addToken(token);
Configuration newConf=new Configuration(conf);
newConf.set(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,"");
Client client=null;
TestSaslProtocol proxy1=null;
TestSaslProtocol proxy2=null;
TestSaslProtocol proxy3=null;
int timeouts[]={111222,3333333};
try {
newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,timeouts[0]);
proxy1=RPC.getProxy(TestSaslProtocol.class,TestSaslProtocol.versionID,addr,newConf);
proxy1.getAuthMethod();
client=WritableRpcEngine.getClient(newConf);
Set conns=client.getConnectionIds();
assertEquals("number of connections in cache is wrong",1,conns.size());
proxy2=RPC.getProxy(TestSaslProtocol.class,TestSaslProtocol.versionID,addr,newConf);
proxy2.getAuthMethod();
assertEquals("number of connections in cache is wrong",1,conns.size());
newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,timeouts[1]);
proxy3=RPC.getProxy(TestSaslProtocol.class,TestSaslProtocol.versionID,addr,newConf);
proxy3.getAuthMethod();
assertEquals("number of connections in cache is wrong",2,conns.size());
ConnectionId[] connsArray={RPC.getConnectionIdForProxy(proxy1),RPC.getConnectionIdForProxy(proxy2),RPC.getConnectionIdForProxy(proxy3)};
assertEquals(connsArray[0],connsArray[1]);
assertEquals(connsArray[0].getMaxIdleTime(),timeouts[0]);
assertFalse(connsArray[0].equals(connsArray[2]));
assertNotSame(connsArray[2].getMaxIdleTime(),timeouts[1]);
}
finally {
server.stop();
if (client != null) {
client.getConnectionIds().clear();
}
if (proxy1 != null) RPC.stopProxy(proxy1);
if (proxy2 != null) RPC.stopProxy(proxy2);
if (proxy3 != null) RPC.stopProxy(proxy3);
}
}
Class: org.apache.hadoop.ipc.TestSocketFactory
APIUtilityVerifierInternalCallVerifierIdentityVerifierEqualityVerifierHybridVerifier
@Test public void testSocketFactoryAsKeyInMap(){
Map dummyCache=new HashMap();
int toBeCached1=1;
int toBeCached2=2;
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,"org.apache.hadoop.ipc.TestSocketFactory$DummySocketFactory");
final SocketFactory dummySocketFactory=NetUtils.getDefaultSocketFactory(conf);
dummyCache.put(dummySocketFactory,toBeCached1);
conf.set(CommonConfigurationKeys.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,"org.apache.hadoop.net.StandardSocketFactory");
final SocketFactory defaultSocketFactory=NetUtils.getDefaultSocketFactory(conf);
dummyCache.put(defaultSocketFactory,toBeCached2);
Assert.assertEquals("The cache contains two elements",2,dummyCache.size());
Assert.assertEquals("Equals of both socket factory shouldn't be same",defaultSocketFactory.equals(dummySocketFactory),false);
assertSame(toBeCached2,dummyCache.remove(defaultSocketFactory));
dummyCache.put(defaultSocketFactory,toBeCached2);
assertSame(toBeCached1,dummyCache.remove(dummySocketFactory));
}
InternalCallVerifierEqualityVerifier
@Test(timeout=1000) public void testClock(){
Clock clock=new Clock();
long templateTime=System.currentTimeMillis();
long time=clock.getTime();
assertEquals(templateTime,time,30);
}
Class: org.apache.hadoop.mapred.TestClusterStatus
EqualityVerifier
@SuppressWarnings("deprecation") @Test(timeout=1000) public void testJobTrackerState(){
Assert.assertEquals(JobTracker.State.RUNNING,clusterStatus.getJobTrackerState());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@SuppressWarnings("deprecation") @Test(timeout=1000) public void testGraylistedTrackers(){
Assert.assertEquals(0,clusterStatus.getGraylistedTrackers());
Assert.assertTrue(clusterStatus.getGraylistedTrackerNames().isEmpty());
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierEqualityVerifier
/**
* Verify counter value works
*/
@SuppressWarnings("deprecation") @Test public void testCounterValue(){
Counters counters=new Counters();
final int NUMBER_TESTS=100;
final int NUMBER_INC=10;
final Random rand=new Random();
for (int i=0; i < NUMBER_TESTS; i++) {
long initValue=rand.nextInt();
long expectedValue=initValue;
Counter counter=counters.findCounter("foo","bar");
counter.setValue(initValue);
assertEquals("Counter value is not initialized correctly",expectedValue,counter.getValue());
for (int j=0; j < NUMBER_INC; j++) {
int incValue=rand.nextInt();
counter.increment(incValue);
expectedValue+=incValue;
assertEquals("Counter value is not incremented correctly",expectedValue,counter.getValue());
}
expectedValue=rand.nextInt();
counter.setValue(expectedValue);
assertEquals("Counter value is not set correctly",expectedValue,counter.getValue());
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Test using the gzip codec with two input files.
*/
@Test(timeout=5000) public void testGzipWithTwoInputs() throws IOException {
CompressionCodec gzip=new GzipCodec();
localFs.delete(workDir,true);
FixedLengthInputFormat format=new FixedLengthInputFormat();
JobConf job=new JobConf(defaultConf);
format.setRecordLength(job,5);
FileInputFormat.setInputPaths(job,workDir);
ReflectionUtils.setConf(gzip,job);
format.configure(job);
writeFile(localFs,new Path(workDir,"part1.txt.gz"),gzip,"one two threefour five six seveneightnine ten ");
writeFile(localFs,new Path(workDir,"part2.txt.gz"),gzip,"ten nine eightsevensix five four threetwo one ");
InputSplit[] splits=format.getSplits(job,100);
assertEquals("compressed splits == 2",2,splits.length);
FileSplit tmp=(FileSplit)splits[0];
if (tmp.getPath().getName().equals("part2.txt.gz")) {
splits[0]=splits[1];
splits[1]=tmp;
}
List results=readSplit(format,splits[0],job);
assertEquals("splits[0] length",10,results.size());
assertEquals("splits[0][5]","six ",results.get(5));
results=readSplit(format,splits[1],job);
assertEquals("splits[1] length",10,results.size());
assertEquals("splits[1][0]","ten ",results.get(0));
assertEquals("splits[1][1]","nine ",results.get(1));
}
InternalCallVerifierEqualityVerifier
@Test public void testRenameMapOutputForReduce() throws Exception {
final JobConf conf=new JobConf();
final MROutputFiles mrOutputFiles=new MROutputFiles();
mrOutputFiles.setConf(conf);
conf.set(MRConfig.LOCAL_DIR,localDirs[0].toString());
final Path mapOut=mrOutputFiles.getOutputFileForWrite(1);
conf.set(MRConfig.LOCAL_DIR,localDirs[1].toString());
final Path mapOutIdx=mrOutputFiles.getOutputIndexFileForWrite(1);
Assert.assertNotEquals("Paths must be different!",mapOut.getParent(),mapOutIdx.getParent());
conf.setStrings(MRConfig.LOCAL_DIR,localDirs);
final FileContext lfc=FileContext.getLocalFSFileContext(conf);
lfc.create(mapOut,EnumSet.of(CREATE)).close();
lfc.create(mapOutIdx,EnumSet.of(CREATE)).close();
final JobId jobId=MRBuilderUtils.newJobId(12345L,1,2);
final TaskId tid=MRBuilderUtils.newTaskId(jobId,0,TaskType.MAP);
final TaskAttemptId taid=MRBuilderUtils.newTaskAttemptId(tid,0);
LocalContainerLauncher.renameMapOutputForReduce(conf,taid,mrOutputFiles);
}
InternalCallVerifierEqualityVerifier
@Test public void testRestart() throws Exception {
String rmAddress1=mrCluster.getConfig().get(YarnConfiguration.RM_ADDRESS);
String rmAdminAddress1=mrCluster.getConfig().get(YarnConfiguration.RM_ADMIN_ADDRESS);
String rmSchedAddress1=mrCluster.getConfig().get(YarnConfiguration.RM_SCHEDULER_ADDRESS);
String rmRstrackerAddress1=mrCluster.getConfig().get(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS);
String rmWebAppAddress1=mrCluster.getConfig().get(YarnConfiguration.RM_WEBAPP_ADDRESS);
String mrHistAddress1=mrCluster.getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS);
String mrHistWebAppAddress1=mrCluster.getConfig().get(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS);
mrCluster.restart();
String rmAddress2=mrCluster.getConfig().get(YarnConfiguration.RM_ADDRESS);
String rmAdminAddress2=mrCluster.getConfig().get(YarnConfiguration.RM_ADMIN_ADDRESS);
String rmSchedAddress2=mrCluster.getConfig().get(YarnConfiguration.RM_SCHEDULER_ADDRESS);
String rmRstrackerAddress2=mrCluster.getConfig().get(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS);
String rmWebAppAddress2=mrCluster.getConfig().get(YarnConfiguration.RM_WEBAPP_ADDRESS);
String mrHistAddress2=mrCluster.getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS);
String mrHistWebAppAddress2=mrCluster.getConfig().get(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS);
assertEquals("Address before restart: " + rmAddress1 + " is different from new address: "+ rmAddress2,rmAddress1,rmAddress2);
assertEquals("Address before restart: " + rmAdminAddress1 + " is different from new address: "+ rmAdminAddress2,rmAdminAddress1,rmAdminAddress2);
assertEquals("Address before restart: " + rmSchedAddress1 + " is different from new address: "+ rmSchedAddress2,rmSchedAddress1,rmSchedAddress2);
assertEquals("Address before restart: " + rmRstrackerAddress1 + " is different from new address: "+ rmRstrackerAddress2,rmRstrackerAddress1,rmRstrackerAddress2);
assertEquals("Address before restart: " + rmWebAppAddress1 + " is different from new address: "+ rmWebAppAddress2,rmWebAppAddress1,rmWebAppAddress2);
assertEquals("Address before restart: " + mrHistAddress1 + " is different from new address: "+ mrHistAddress2,mrHistAddress1,mrHistAddress2);
assertEquals("Address before restart: " + mrHistWebAppAddress1 + " is different from new address: "+ mrHistWebAppAddress2,mrHistWebAppAddress1,mrHistWebAppAddress2);
}
InternalCallVerifierEqualityVerifier
@Test public void testPeriodStatsets(){
PeriodicStatsAccumulator cumulative=new CumulativePeriodicStats(8);
PeriodicStatsAccumulator status=new StatePeriodicStats(8);
cumulative.extend(0.0D,0);
cumulative.extend(0.4375D,700);
cumulative.extend(0.5625D,1100);
cumulative.extend(0.625D,1300);
cumulative.extend(1.0D,7901);
int total=0;
int[] results=cumulative.getValues();
for (int i=0; i < 8; ++i) {
System.err.println("segment i = " + results[i]);
}
assertEquals("Bad interpolation in cumulative segment 0",200,results[0]);
assertEquals("Bad interpolation in cumulative segment 1",200,results[1]);
assertEquals("Bad interpolation in cumulative segment 2",200,results[2]);
assertEquals("Bad interpolation in cumulative segment 3",300,results[3]);
assertEquals("Bad interpolation in cumulative segment 4",400,results[4]);
assertEquals("Bad interpolation in cumulative segment 5",2200,results[5]);
assertEquals("Bad interpolation in cumulative segment 6",2200,results[6]);
assertEquals("Bad interpolation in cumulative segment 7",2201,results[7]);
status.extend(0.0D,0);
status.extend(1.0D / 16.0D,300);
status.extend(3.0D / 16.0D,700);
status.extend(7.0D / 16.0D,2300);
status.extend(1.0D,1400);
;
results=status.getValues();
assertEquals("Bad interpolation in status segment 0",275,results[0]);
assertEquals("Bad interpolation in status segment 1",750,results[1]);
assertEquals("Bad interpolation in status segment 2",1500,results[2]);
assertEquals("Bad interpolation in status segment 3",2175,results[3]);
assertEquals("Bad interpolation in status segment 4",2100,results[4]);
assertEquals("Bad interpolation in status segment 5",1900,results[5]);
assertEquals("Bad interpolation in status segment 6",1700,results[6]);
assertEquals("Bad interpolation in status segment 7",1500,results[7]);
}
Class: org.apache.hadoop.mapred.TestTaskStatus
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Test the {@link TaskStatus} against large sized task-diagnostic-info and
* state-string. Does the following
* - create Map/Reduce TaskStatus such that the task-diagnostic-info and
* state-string are small strings and check their contents
* - append them with small string and check their contents
* - append them with large string and check their size
* - update the status using statusUpdate() calls and check the size/contents
* - create Map/Reduce TaskStatus with large string and check their size
*/
@Test public void testTaskDiagnosticsAndStateString(){
String test="hi";
final int maxSize=16;
TaskStatus status=new TaskStatus(null,0,0,null,test,test,null,null,null){
@Override protected int getMaxStringSize(){
return maxSize;
}
@Override public void addFetchFailedMap( TaskAttemptID mapTaskId){
}
@Override public boolean getIsMap(){
return false;
}
}
;
assertEquals("Small diagnostic info test failed",status.getDiagnosticInfo(),test);
assertEquals("Small state string test failed",status.getStateString(),test);
String newDInfo=test.concat(test);
status.setDiagnosticInfo(test);
status.setStateString(newDInfo);
assertEquals("Small diagnostic info append failed",newDInfo,status.getDiagnosticInfo());
assertEquals("Small state-string append failed",newDInfo,status.getStateString());
TaskStatus newStatus=(TaskStatus)status.clone();
String newSInfo="hi1";
newStatus.setStateString(newSInfo);
status.statusUpdate(newStatus);
newDInfo=newDInfo.concat(newStatus.getDiagnosticInfo());
assertEquals("Status-update on diagnostic-info failed",newDInfo,status.getDiagnosticInfo());
assertEquals("Status-update on state-string failed",newSInfo,status.getStateString());
newSInfo="hi2";
status.statusUpdate(0,newSInfo,null);
assertEquals("Status-update on state-string failed",newSInfo,status.getStateString());
newSInfo="hi3";
status.statusUpdate(null,0,newSInfo,null,0);
assertEquals("Status-update on state-string failed",newSInfo,status.getStateString());
String large="hihihihihihihihihihi";
status.setDiagnosticInfo(large);
status.setStateString(large);
assertEquals("Large diagnostic info append test failed",maxSize,status.getDiagnosticInfo().length());
assertEquals("Large state-string append test failed",maxSize,status.getStateString().length());
newStatus.setDiagnosticInfo(large + "0");
newStatus.setStateString(large + "1");
status.statusUpdate(newStatus);
assertEquals("Status-update on diagnostic info failed",maxSize,status.getDiagnosticInfo().length());
assertEquals("Status-update on state-string failed",maxSize,status.getStateString().length());
status.statusUpdate(0,large + "2",null);
assertEquals("Status-update on state-string failed",maxSize,status.getStateString().length());
status.statusUpdate(null,0,large + "3",null,0);
assertEquals("Status-update on state-string failed",maxSize,status.getStateString().length());
status=new TaskStatus(null,0,0,null,large,large,null,null,null){
@Override protected int getMaxStringSize(){
return maxSize;
}
@Override public void addFetchFailedMap( TaskAttemptID mapTaskId){
}
@Override public boolean getIsMap(){
return false;
}
}
;
assertEquals("Large diagnostic info test failed",maxSize,status.getDiagnosticInfo().length());
assertEquals("Large state-string test failed",maxSize,status.getStateString().length());
}
APIUtilityVerifierIterativeVerifierBranchVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=500000) public void testFormat() throws Exception {
JobConf job=new JobConf(defaultConf);
Path file=new Path(workDir,"test.txt");
Reporter reporter=Reporter.NULL;
int seed=new Random().nextInt();
LOG.info("seed = " + seed);
Random random=new Random(seed);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(job,workDir);
for (int length=0; length < MAX_LENGTH; length+=random.nextInt(MAX_LENGTH / 10) + 1) {
LOG.debug("creating; entries = " + length);
Writer writer=new OutputStreamWriter(localFs.create(file));
try {
for (int i=0; i < length; i++) {
writer.write(Integer.toString(i));
writer.write("\n");
}
}
finally {
writer.close();
}
TextInputFormat format=new TextInputFormat();
format.configure(job);
LongWritable key=new LongWritable();
Text value=new Text();
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(MAX_LENGTH / 20) + 1;
LOG.debug("splitting: requesting = " + numSplits);
InputSplit[] splits=format.getSplits(job,numSplits);
LOG.debug("splitting: got = " + splits.length);
if (length == 0) {
assertEquals("Files of length 0 are not returned from FileInputFormat.getSplits().",1,splits.length);
assertEquals("Empty file length == 0",0,splits[0].getLength());
}
BitSet bits=new BitSet(length);
for (int j=0; j < splits.length; j++) {
LOG.debug("split[" + j + "]= "+ splits[j]);
RecordReader reader=format.getRecordReader(splits[j],job,reporter);
try {
int count=0;
while (reader.next(key,value)) {
int v=Integer.parseInt(value.toString());
LOG.debug("read " + v);
if (bits.get(v)) {
LOG.warn("conflict with " + v + " in split "+ j+ " at position "+ reader.getPos());
}
assertFalse("Key in multiple partitions.",bits.get(v));
bits.set(v);
count++;
}
LOG.debug("splits[" + j + "]="+ splits[j]+ " count="+ count);
}
finally {
reader.close();
}
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierEqualityVerifier
/**
* Test readLine for correct interpretation of maxLineLength
* (returned string should be clipped at maxLineLength, and the
* remaining bytes on the same line should be thrown out).
* Also check that returned value matches the string length.
* Varies buffer size to stress test.
* @throws Exception
*/
@Test(timeout=5000) public void testMaxLineLength() throws Exception {
final String STR="a\nbb\n\nccc\rdddd\r\neeeee";
final int STRLENBYTES=STR.getBytes().length;
Text out=new Text();
for (int bufsz=1; bufsz < STRLENBYTES + 1; ++bufsz) {
LineReader in=makeStream(STR,bufsz);
int c=0;
c+=in.readLine(out,1);
assertEquals("line1 length, bufsz: " + bufsz,1,out.getLength());
c+=in.readLine(out,1);
assertEquals("line2 length, bufsz: " + bufsz,1,out.getLength());
c+=in.readLine(out,1);
assertEquals("line3 length, bufsz: " + bufsz,0,out.getLength());
c+=in.readLine(out,3);
assertEquals("line4 length, bufsz: " + bufsz,3,out.getLength());
c+=in.readLine(out,10);
assertEquals("line5 length, bufsz: " + bufsz,4,out.getLength());
c+=in.readLine(out,8);
assertEquals("line5 length, bufsz: " + bufsz,5,out.getLength());
assertEquals("end of file, bufsz: " + bufsz,0,in.readLine(out));
assertEquals("total bytes, bufsz: " + bufsz,c,STRLENBYTES);
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Test using the gzip codec and an empty input file
*/
@Test(timeout=5000) public void testGzipEmpty() throws IOException {
JobConf job=new JobConf(defaultConf);
CompressionCodec gzip=new GzipCodec();
ReflectionUtils.setConf(gzip,job);
localFs.delete(workDir,true);
writeFile(localFs,new Path(workDir,"empty.gz"),gzip,"");
FileInputFormat.setInputPaths(job,workDir);
TextInputFormat format=new TextInputFormat();
format.configure(job);
InputSplit[] splits=format.getSplits(job,100);
assertEquals("Compressed files of length 0 are not returned from FileInputFormat.getSplits().",1,splits.length);
List results=readSplit(format,splits[0],job);
assertEquals("Compressed empty file length == 0",0,results.size());
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=900000) public void testSplitableCodecs() throws IOException {
JobConf conf=new JobConf(defaultConf);
int seed=new Random().nextInt();
CompressionCodec codec=null;
try {
codec=(CompressionCodec)ReflectionUtils.newInstance(conf.getClassByName("org.apache.hadoop.io.compress.BZip2Codec"),conf);
}
catch ( ClassNotFoundException cnfe) {
throw new IOException("Illegal codec!");
}
Path file=new Path(workDir,"test" + codec.getDefaultExtension());
Reporter reporter=Reporter.NULL;
LOG.info("seed = " + seed);
Random random=new Random(seed);
FileSystem localFs=FileSystem.getLocal(conf);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(conf,workDir);
final int MAX_LENGTH=500000;
for (int length=MAX_LENGTH / 2; length < MAX_LENGTH; length+=random.nextInt(MAX_LENGTH / 4) + 1) {
LOG.info("creating; entries = " + length);
Writer writer=new OutputStreamWriter(codec.createOutputStream(localFs.create(file)));
try {
for (int i=0; i < length; i++) {
writer.write(Integer.toString(i));
writer.write("\n");
}
}
finally {
writer.close();
}
TextInputFormat format=new TextInputFormat();
format.configure(conf);
LongWritable key=new LongWritable();
Text value=new Text();
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(MAX_LENGTH / 2000) + 1;
LOG.info("splitting: requesting = " + numSplits);
InputSplit[] splits=format.getSplits(conf,numSplits);
LOG.info("splitting: got = " + splits.length);
BitSet bits=new BitSet(length);
for (int j=0; j < splits.length; j++) {
LOG.debug("split[" + j + "]= "+ splits[j]);
RecordReader reader=format.getRecordReader(splits[j],conf,reporter);
try {
int counter=0;
while (reader.next(key,value)) {
int v=Integer.parseInt(value.toString());
LOG.debug("read " + v);
if (bits.get(v)) {
LOG.warn("conflict with " + v + " in split "+ j+ " at position "+ reader.getPos());
}
assertFalse("Key in multiple partitions.",bits.get(v));
bits.set(v);
counter++;
}
if (counter > 0) {
LOG.info("splits[" + j + "]="+ splits[j]+ " count="+ counter);
}
else {
LOG.debug("splits[" + j + "]="+ splits[j]+ " count="+ counter);
}
}
finally {
reader.close();
}
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierEqualityVerifier
/**
* Test readLine for various kinds of line termination sequneces.
* Varies buffer size to stress test. Also check that returned
* value matches the string length.
* @throws Exception
*/
@Test(timeout=5000) public void testNewLines() throws Exception {
final String STR="a\nbb\n\nccc\rdddd\r\r\r\n\r\neeeee";
final int STRLENBYTES=STR.getBytes().length;
Text out=new Text();
for (int bufsz=1; bufsz < STRLENBYTES + 1; ++bufsz) {
LineReader in=makeStream(STR,bufsz);
int c=0;
c+=in.readLine(out);
assertEquals("line1 length, bufsz:" + bufsz,1,out.getLength());
c+=in.readLine(out);
assertEquals("line2 length, bufsz:" + bufsz,2,out.getLength());
c+=in.readLine(out);
assertEquals("line3 length, bufsz:" + bufsz,0,out.getLength());
c+=in.readLine(out);
assertEquals("line4 length, bufsz:" + bufsz,3,out.getLength());
c+=in.readLine(out);
assertEquals("line5 length, bufsz:" + bufsz,4,out.getLength());
c+=in.readLine(out);
assertEquals("line6 length, bufsz:" + bufsz,0,out.getLength());
c+=in.readLine(out);
assertEquals("line7 length, bufsz:" + bufsz,0,out.getLength());
c+=in.readLine(out);
assertEquals("line8 length, bufsz:" + bufsz,0,out.getLength());
c+=in.readLine(out);
assertEquals("line9 length, bufsz:" + bufsz,5,out.getLength());
assertEquals("end of file, bufsz: " + bufsz,0,in.readLine(out));
assertEquals("total bytes, bufsz: " + bufsz,c,STRLENBYTES);
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Test using the gzip codec for reading
*/
@Test(timeout=5000) public void testGzip() throws IOException {
JobConf job=new JobConf(defaultConf);
CompressionCodec gzip=new GzipCodec();
ReflectionUtils.setConf(gzip,job);
localFs.delete(workDir,true);
writeFile(localFs,new Path(workDir,"part1.txt.gz"),gzip,"the quick\nbrown\nfox jumped\nover\n the lazy\n dog\n");
writeFile(localFs,new Path(workDir,"part2.txt.gz"),gzip,"this is a test\nof gzip\n");
FileInputFormat.setInputPaths(job,workDir);
TextInputFormat format=new TextInputFormat();
format.configure(job);
InputSplit[] splits=format.getSplits(job,100);
assertEquals("compressed splits == 2",2,splits.length);
FileSplit tmp=(FileSplit)splits[0];
if (tmp.getPath().getName().equals("part2.txt.gz")) {
splits[0]=splits[1];
splits[1]=tmp;
}
List results=readSplit(format,splits[0],job);
assertEquals("splits[0] length",6,results.size());
assertEquals("splits[0][5]"," dog",results.get(5).toString());
results=readSplit(format,splits[1],job);
assertEquals("splits[1] length",2,results.size());
assertEquals("splits[1][0]","this is a test",results.get(0).toString());
assertEquals("splits[1][1]","of gzip",results.get(1).toString());
}
InternalCallVerifierEqualityVerifier
@Test(timeout=5000) public void testUTF8() throws Exception {
LineReader in=makeStream("abcd\u20acbdcd\u20ac");
Text line=new Text();
in.readLine(line);
assertEquals("readLine changed utf8 characters","abcd\u20acbdcd\u20ac",line.toString());
in=makeStream("abc\u200axyz");
in.readLine(line);
assertEquals("split on fake newline","abc\u200axyz",line.toString());
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Test Pseudo Local File System methods like getFileStatus(), create(),
* open(), exists() for
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Test if a file on PseudoLocalFs of a specific size can be opened and read.
* Validate the size of the data read.
* Test the read methods of {@link PseudoLocalFs.RandomInputStream}.
* @throws Exception
*/
@Test public void testPseudoLocalFsFileSize() throws Exception {
long fileSize=10000;
Path path=PseudoLocalFs.generateFilePath("myPsedoFile",fileSize);
PseudoLocalFs pfs=new PseudoLocalFs();
pfs.create(path);
InputStream in=pfs.open(path,0);
long totalSize=0;
while (in.read() >= 0) {
++totalSize;
}
in.close();
assertEquals("File size mismatch with read().",fileSize,totalSize);
in=pfs.open(path,0);
totalSize=0;
byte[] b=new byte[1024];
int bytesRead=in.read(b);
while (bytesRead >= 0) {
totalSize+=bytesRead;
bytesRead=in.read(b);
}
assertEquals("File size mismatch with read(byte[]).",fileSize,totalSize);
}
InternalCallVerifierEqualityVerifier
/**
* Test if {@link RandomTextDataGenerator} can generate random words of
* desired size.
*/
@Test public void testRandomTextDataGenerator(){
RandomTextDataGenerator rtdg=new RandomTextDataGenerator(10,0L,5);
List words=rtdg.getRandomWords();
assertEquals("List size mismatch",10,words.size());
Set wordsSet=new HashSet(words);
assertEquals("List size mismatch due to duplicates",10,wordsSet.size());
for ( String word : wordsSet) {
assertEquals("Word size mismatch",5,word.length());
}
}
InternalCallVerifierEqualityVerifier
@Test public void testSetReducerWithReducerByValueAsFalse() throws Exception {
JobConf jobConf=new JobConf();
JobConf reducerConf=new JobConf();
Chain.setReducer(jobConf,MyReducer.class,Object.class,Object.class,Object.class,Object.class,false,reducerConf);
boolean reduceByValue=reducerConf.getBoolean("chain.reducer.byValue",true);
Assert.assertEquals("It should set chain.reducer.byValue as false " + "in reducerConf when we give value as false",false,reduceByValue);
}
InternalCallVerifierEqualityVerifier
@Test public void testSetReducerWithReducerByValueAsTrue() throws Exception {
JobConf jobConf=new JobConf();
JobConf reducerConf=new JobConf();
Chain.setReducer(jobConf,MyReducer.class,Object.class,Object.class,Object.class,Object.class,true,reducerConf);
boolean reduceByValue=reducerConf.getBoolean("chain.reducer.byValue",false);
Assert.assertEquals("It should set chain.reducer.byValue as true " + "in reducerConf when we give value as true",true,reduceByValue);
}
BranchVerifierUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* test org.apache.hadoop.mapred.pipes.Submitter
* @throws Exception
*/
@Test public void testSubmitter() throws Exception {
JobConf conf=new JobConf();
File[] psw=cleanTokenPasswordFile();
System.setProperty("test.build.data","target/tmp/build/TEST_SUBMITTER_MAPPER/data");
conf.set("hadoop.log.dir","target/tmp");
Submitter.setIsJavaMapper(conf,false);
Submitter.setIsJavaReducer(conf,false);
Submitter.setKeepCommandFile(conf,false);
Submitter.setIsJavaRecordReader(conf,false);
Submitter.setIsJavaRecordWriter(conf,false);
PipesPartitioner partitioner=new PipesPartitioner();
partitioner.configure(conf);
Submitter.setJavaPartitioner(conf,partitioner.getClass());
assertEquals(PipesPartitioner.class,(Submitter.getJavaPartitioner(conf)));
SecurityManager securityManager=System.getSecurityManager();
PrintStream oldps=System.out;
ByteArrayOutputStream out=new ByteArrayOutputStream();
ExitUtil.disableSystemExit();
try {
System.setOut(new PrintStream(out));
Submitter.main(new String[0]);
fail();
}
catch ( ExitUtil.ExitException e) {
assertTrue(out.toString().contains(""));
assertTrue(out.toString().contains("bin/hadoop pipes"));
assertTrue(out.toString().contains("[-input ] // Input directory"));
assertTrue(out.toString().contains("[-output ] // Output directory"));
assertTrue(out.toString().contains("[-jar // jar filename"));
assertTrue(out.toString().contains("[-inputformat ] // InputFormat class"));
assertTrue(out.toString().contains("[-map ] // Java Map class"));
assertTrue(out.toString().contains("[-partitioner ] // Java Partitioner"));
assertTrue(out.toString().contains("[-reduce ] // Java Reduce class"));
assertTrue(out.toString().contains("[-writer ] // Java RecordWriter"));
assertTrue(out.toString().contains("[-program ] // executable URI"));
assertTrue(out.toString().contains("[-reduces ] // number of reduces"));
assertTrue(out.toString().contains("[-lazyOutput ] // createOutputLazily"));
assertTrue(out.toString().contains("-conf specify an application configuration file"));
assertTrue(out.toString().contains("-D use value for given property"));
assertTrue(out.toString().contains("-fs specify a namenode"));
assertTrue(out.toString().contains("-jt specify a job tracker"));
assertTrue(out.toString().contains("-files specify comma separated files to be copied to the map reduce cluster"));
assertTrue(out.toString().contains("-libjars specify comma separated jar files to include in the classpath."));
assertTrue(out.toString().contains("-archives specify comma separated archives to be unarchived on the compute machines."));
}
finally {
System.setOut(oldps);
System.setSecurityManager(securityManager);
if (psw != null) {
for ( File file : psw) {
file.deleteOnExit();
}
}
}
try {
File fCommand=getFileCommand(null);
String[] args=new String[22];
File input=new File(workSpace + File.separator + "input");
if (!input.exists()) {
Assert.assertTrue(input.createNewFile());
}
File outPut=new File(workSpace + File.separator + "output");
FileUtil.fullyDelete(outPut);
args[0]="-input";
args[1]=input.getAbsolutePath();
args[2]="-output";
args[3]=outPut.getAbsolutePath();
args[4]="-inputformat";
args[5]="org.apache.hadoop.mapred.TextInputFormat";
args[6]="-map";
args[7]="org.apache.hadoop.mapred.lib.IdentityMapper";
args[8]="-partitioner";
args[9]="org.apache.hadoop.mapred.pipes.PipesPartitioner";
args[10]="-reduce";
args[11]="org.apache.hadoop.mapred.lib.IdentityReducer";
args[12]="-writer";
args[13]="org.apache.hadoop.mapred.TextOutputFormat";
args[14]="-program";
args[15]=fCommand.getAbsolutePath();
args[16]="-reduces";
args[17]="2";
args[18]="-lazyOutput";
args[19]="lazyOutput";
args[20]="-jobconf";
args[21]="mapreduce.pipes.isjavarecordwriter=false,mapreduce.pipes.isjavarecordreader=false";
Submitter.main(args);
fail();
}
catch ( ExitUtil.ExitException e) {
assertEquals(e.status,0);
}
finally {
System.setOut(oldps);
System.setSecurityManager(securityManager);
}
}
InternalCallVerifierEqualityVerifier
/**
* test PipesPartitioner
* test set and get data from PipesPartitioner
*/
@Test public void testPipesPartitioner(){
PipesPartitioner partitioner=new PipesPartitioner();
JobConf configuration=new JobConf();
Submitter.getJavaPartitioner(configuration);
partitioner.configure(new JobConf());
IntWritable iw=new IntWritable(4);
assertEquals(0,partitioner.getPartition(iw,new Text("test"),2));
PipesPartitioner.setNextPartition(3);
assertEquals(3,partitioner.getPartition(iw,new Text("test"),2));
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierEqualityVerifier
/**
* Verify counter value works
*/
@Test public void testCounterValue(){
final int NUMBER_TESTS=100;
final int NUMBER_INC=10;
final Random rand=new Random();
for (int i=0; i < NUMBER_TESTS; i++) {
long initValue=rand.nextInt();
long expectedValue=initValue;
Counter counter=new Counters().findCounter("test","foo");
counter.setValue(initValue);
assertEquals("Counter value is not initialized correctly",expectedValue,counter.getValue());
for (int j=0; j < NUMBER_INC; j++) {
int incValue=rand.nextInt();
counter.increment(incValue);
expectedValue+=incValue;
assertEquals("Counter value is not incremented correctly",expectedValue,counter.getValue());
}
expectedValue=rand.nextInt();
counter.setValue(expectedValue);
assertEquals("Counter value is not set correctly",expectedValue,counter.getValue());
}
}
EqualityVerifier
@Test public void testEnums() throws Exception {
for ( YarnApplicationState applicationState : YarnApplicationState.values()) {
TypeConverter.fromYarn(applicationState,FinalApplicationStatus.FAILED);
}
Assert.assertEquals(State.PREP,TypeConverter.fromYarn(YarnApplicationState.NEW_SAVING,FinalApplicationStatus.FAILED));
for ( TaskType taskType : TaskType.values()) {
TypeConverter.fromYarn(taskType);
}
for ( JobState jobState : JobState.values()) {
TypeConverter.fromYarn(jobState);
}
for ( QueueState queueState : QueueState.values()) {
TypeConverter.fromYarn(queueState);
}
for ( TaskState taskState : TaskState.values()) {
TypeConverter.fromYarn(taskState);
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Test that child queues are converted too during conversion of the parent
* queue
*/
@Test public void testFromYarnQueue(){
org.apache.hadoop.yarn.api.records.QueueInfo child=Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
Mockito.when(child.getQueueState()).thenReturn(QueueState.RUNNING);
org.apache.hadoop.yarn.api.records.QueueInfo queueInfo=Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
List children=new ArrayList();
children.add(child);
Mockito.when(queueInfo.getChildQueues()).thenReturn(children);
Mockito.when(queueInfo.getQueueState()).thenReturn(QueueState.RUNNING);
org.apache.hadoop.mapreduce.QueueInfo returned=TypeConverter.fromYarn(queueInfo,new Configuration());
Assert.assertEquals("QueueInfo children weren't properly converted",returned.getQueueChildren().size(),1);
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testFromYarnApplicationReport(){
ApplicationId mockAppId=mock(ApplicationId.class);
when(mockAppId.getClusterTimestamp()).thenReturn(12345L);
when(mockAppId.getId()).thenReturn(6789);
ApplicationReport mockReport=mock(ApplicationReport.class);
when(mockReport.getTrackingUrl()).thenReturn("dummy-tracking-url");
when(mockReport.getApplicationId()).thenReturn(mockAppId);
when(mockReport.getYarnApplicationState()).thenReturn(YarnApplicationState.KILLED);
when(mockReport.getUser()).thenReturn("dummy-user");
when(mockReport.getQueue()).thenReturn("dummy-queue");
String jobFile="dummy-path/job.xml";
try {
JobStatus status=TypeConverter.fromYarn(mockReport,jobFile);
}
catch ( NullPointerException npe) {
Assert.fail("Type converstion from YARN fails for jobs without " + "ApplicationUsageReport");
}
ApplicationResourceUsageReport appUsageRpt=Records.newRecord(ApplicationResourceUsageReport.class);
Resource r=Records.newRecord(Resource.class);
r.setMemory(2048);
appUsageRpt.setNeededResources(r);
appUsageRpt.setNumReservedContainers(1);
appUsageRpt.setNumUsedContainers(3);
appUsageRpt.setReservedResources(r);
appUsageRpt.setUsedResources(r);
when(mockReport.getApplicationResourceUsageReport()).thenReturn(appUsageRpt);
JobStatus status=TypeConverter.fromYarn(mockReport,jobFile);
Assert.assertNotNull("fromYarn returned null status",status);
Assert.assertEquals("jobFile set incorrectly","dummy-path/job.xml",status.getJobFile());
Assert.assertEquals("queue set incorrectly","dummy-queue",status.getQueue());
Assert.assertEquals("trackingUrl set incorrectly","dummy-tracking-url",status.getTrackingUrl());
Assert.assertEquals("user set incorrectly","dummy-user",status.getUsername());
Assert.assertEquals("schedulingInfo set incorrectly","dummy-tracking-url",status.getSchedulingInfo());
Assert.assertEquals("jobId set incorrectly",6789,status.getJobID().getId());
Assert.assertEquals("state set incorrectly",JobStatus.State.KILLED,status.getState());
Assert.assertEquals("needed mem info set incorrectly",2048,status.getNeededMem());
Assert.assertEquals("num rsvd slots info set incorrectly",1,status.getNumReservedSlots());
Assert.assertEquals("num used slots info set incorrectly",3,status.getNumUsedSlots());
Assert.assertEquals("rsvd mem info set incorrectly",2048,status.getReservedMem());
Assert.assertEquals("used mem info set incorrectly",2048,status.getUsedMem());
}
InternalCallVerifierEqualityVerifier
/**
* test generate sql script for OracleDBRecordReader.
*/
@Test(timeout=2000) public void testOracleDBRecordReader() throws Exception {
DBInputSplit splitter=new DBInputSplit(1,10);
Configuration configuration=new Configuration();
Connection connect=DriverForTest.getConnection();
DBConfiguration dbConfiguration=new DBConfiguration(configuration);
dbConfiguration.setInputOrderBy("Order");
String[] fields={"f1","f2"};
OracleDBRecordReader recorder=new OracleDBRecordReader(splitter,NullDBWritable.class,configuration,connect,dbConfiguration,"condition",fields,"table");
assertEquals("SELECT * FROM (SELECT a.*,ROWNUM dbif_rno FROM ( SELECT f1, f2 FROM table WHERE condition ORDER BY Order ) a WHERE rownum <= 10 ) WHERE dbif_rno > 1",recorder.getSelectQuery());
}
InternalCallVerifierEqualityVerifier
/**
* test splitters from DataDrivenDBInputFormat. For different data types may
* be different splitter
*/
@Test(timeout=1000) public void testDataDrivenDBInputFormatSplitter(){
DataDrivenDBInputFormat format=new DataDrivenDBInputFormat();
testCommonSplitterTypes(format);
assertEquals(DateSplitter.class,format.getSplitter(Types.TIMESTAMP).getClass());
assertEquals(DateSplitter.class,format.getSplitter(Types.DATE).getClass());
assertEquals(DateSplitter.class,format.getSplitter(Types.TIME).getClass());
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test(timeout=10000) public void testFormat() throws IOException, InterruptedException {
Job job=Job.getInstance(conf);
Random random=new Random();
long seed=random.nextLong();
random.setSeed(seed);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(job,workDir);
final int length=10000;
final int numFiles=10;
createFiles(length,numFiles,random,job);
TaskAttemptContext context=MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
InputFormat format=new CombineSequenceFileInputFormat();
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(length / (SequenceFile.SYNC_INTERVAL / 20)) + 1;
LOG.info("splitting: requesting = " + numSplits);
List splits=format.getSplits(job);
LOG.info("splitting: got = " + splits.size());
assertEquals("We got more than one splits!",1,splits.size());
InputSplit split=splits.get(0);
assertEquals("It should be CombineFileSplit",CombineFileSplit.class,split.getClass());
BitSet bits=new BitSet(length);
RecordReader reader=format.createRecordReader(split,context);
MapContext mcontext=new MapContextImpl(job.getConfiguration(),context.getTaskAttemptID(),reader,null,null,MapReduceTestUtil.createDummyReporter(),split);
reader.initialize(split,mcontext);
assertEquals("reader class is CombineFileRecordReader.",CombineFileRecordReader.class,reader.getClass());
try {
while (reader.nextKeyValue()) {
IntWritable key=reader.getCurrentKey();
BytesWritable value=reader.getCurrentValue();
assertNotNull("Value should not be null.",value);
final int k=key.get();
LOG.debug("read " + k);
assertFalse("Key in multiple partitions.",bits.get(k));
bits.set(k);
}
}
finally {
reader.close();
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test(timeout=10000) public void testFormat() throws Exception {
Job job=Job.getInstance(new Configuration(defaultConf));
Random random=new Random();
long seed=random.nextLong();
LOG.info("seed = " + seed);
random.setSeed(seed);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(job,workDir);
final int length=10000;
final int numFiles=10;
createFiles(length,numFiles,random);
CombineTextInputFormat format=new CombineTextInputFormat();
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(length / 20) + 1;
LOG.info("splitting: requesting = " + numSplits);
List splits=format.getSplits(job);
LOG.info("splitting: got = " + splits.size());
assertEquals("We got more than one splits!",1,splits.size());
InputSplit split=splits.get(0);
assertEquals("It should be CombineFileSplit",CombineFileSplit.class,split.getClass());
BitSet bits=new BitSet(length);
LOG.debug("split= " + split);
TaskAttemptContext context=MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
RecordReader reader=format.createRecordReader(split,context);
assertEquals("reader class is CombineFileRecordReader.",CombineFileRecordReader.class,reader.getClass());
MapContext mcontext=new MapContextImpl(job.getConfiguration(),context.getTaskAttemptID(),reader,null,null,MapReduceTestUtil.createDummyReporter(),split);
reader.initialize(split,mcontext);
try {
int count=0;
while (reader.nextKeyValue()) {
LongWritable key=reader.getCurrentKey();
assertNotNull("Key should not be null.",key);
Text value=reader.getCurrentValue();
final int v=Integer.parseInt(value.toString());
LOG.debug("read " + v);
assertFalse("Key in multiple partitions.",bits.get(v));
bits.set(v);
count++;
}
LOG.debug("split=" + split + " count="+ count);
}
finally {
reader.close();
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
APIUtilityVerifierBranchVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* Test using the gzip codec for reading
*/
@Test(timeout=10000) public void testGzip() throws IOException, InterruptedException {
Configuration conf=new Configuration(defaultConf);
CompressionCodec gzip=new GzipCodec();
ReflectionUtils.setConf(gzip,conf);
localFs.delete(workDir,true);
writeFile(localFs,new Path(workDir,"part1.txt.gz"),gzip,"the quick\nbrown\nfox jumped\nover\n the lazy\n dog\n");
writeFile(localFs,new Path(workDir,"part2.txt.gz"),gzip,"this is a test\nof gzip\n");
Job job=Job.getInstance(conf);
FileInputFormat.setInputPaths(job,workDir);
CombineTextInputFormat format=new CombineTextInputFormat();
List splits=format.getSplits(job);
assertEquals("compressed splits == 1",1,splits.size());
List results=readSplit(format,splits.get(0),job);
assertEquals("splits[0] length",8,results.size());
final String[] firstList={"the quick","brown","fox jumped","over"," the lazy"," dog"};
final String[] secondList={"this is a test","of gzip"};
String first=results.get(0).toString();
if (first.equals(firstList[0])) {
testResults(results,firstList,secondList);
}
else if (first.equals(secondList[0])) {
testResults(results,secondList,firstList);
}
else {
fail("unexpected first token!");
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Test using the gzip codec with two input files.
*/
@Test(timeout=5000) public void testGzipWithTwoInputs() throws Exception {
CompressionCodec gzip=new GzipCodec();
localFs.delete(workDir,true);
Job job=Job.getInstance(defaultConf);
FixedLengthInputFormat format=new FixedLengthInputFormat();
format.setRecordLength(job.getConfiguration(),5);
ReflectionUtils.setConf(gzip,job.getConfiguration());
FileInputFormat.setInputPaths(job,workDir);
writeFile(localFs,new Path(workDir,"part1.txt.gz"),gzip,"one two threefour five six seveneightnine ten ");
writeFile(localFs,new Path(workDir,"part2.txt.gz"),gzip,"ten nine eightsevensix five four threetwo one ");
List splits=format.getSplits(job);
assertEquals("compressed splits == 2",2,splits.size());
FileSplit tmp=(FileSplit)splits.get(0);
if (tmp.getPath().getName().equals("part2.txt.gz")) {
splits.set(0,splits.get(1));
splits.set(1,tmp);
}
List results=readSplit(format,splits.get(0),job);
assertEquals("splits[0] length",10,results.size());
assertEquals("splits[0][5]","six ",results.get(5));
results=readSplit(format,splits.get(1),job);
assertEquals("splits[1] length",10,results.size());
assertEquals("splits[1][0]","ten ",results.get(0));
assertEquals("splits[1][1]","nine ",results.get(1));
}
APIUtilityVerifierIterativeVerifierEqualityVerifier
/**
* Verify IntervalSampler contract, that samples are taken at regular
* intervals from the given splits.
*/
@Test @SuppressWarnings("unchecked") public void testIntervalSampler() throws Exception {
final int TOT_SPLITS=16;
final int PER_SPLIT_SAMPLE=4;
final int NUM_SAMPLES=TOT_SPLITS * PER_SPLIT_SAMPLE;
final double FREQ=1.0 / TOT_SPLITS;
InputSampler.Sampler sampler=new InputSampler.IntervalSampler(FREQ,NUM_SAMPLES);
int inits[]=new int[TOT_SPLITS];
for (int i=0; i < TOT_SPLITS; ++i) {
inits[i]=i;
}
Job ignored=Job.getInstance();
Object[] samples=sampler.getSample(new TestInputSamplerIF(NUM_SAMPLES,TOT_SPLITS,inits),ignored);
assertEquals(NUM_SAMPLES,samples.length);
Arrays.sort(samples,new IntWritable.Comparator());
for (int i=0; i < NUM_SAMPLES; ++i) {
assertEquals(i,((IntWritable)samples[i]).get());
}
}
APIUtilityVerifierIterativeVerifierEqualityVerifier
/**
* Verify IntervalSampler in mapred.lib.InputSampler, which is added back
* for binary compatibility of M/R 1.x
*/
@Test(timeout=30000) @SuppressWarnings("unchecked") public void testMapredIntervalSampler() throws Exception {
final int TOT_SPLITS=16;
final int PER_SPLIT_SAMPLE=4;
final int NUM_SAMPLES=TOT_SPLITS * PER_SPLIT_SAMPLE;
final double FREQ=1.0 / TOT_SPLITS;
org.apache.hadoop.mapred.lib.InputSampler.Sampler sampler=new org.apache.hadoop.mapred.lib.InputSampler.IntervalSampler(FREQ,NUM_SAMPLES);
int inits[]=new int[TOT_SPLITS];
for (int i=0; i < TOT_SPLITS; ++i) {
inits[i]=i;
}
Job ignored=Job.getInstance();
Object[] samples=sampler.getSample(new TestInputSamplerIF(NUM_SAMPLES,TOT_SPLITS,inits),ignored);
assertEquals(NUM_SAMPLES,samples.length);
Arrays.sort(samples,new IntWritable.Comparator());
for (int i=0; i < NUM_SAMPLES; ++i) {
assertEquals(i,((IntWritable)samples[i]).get());
}
}
APIUtilityVerifierIterativeVerifierEqualityVerifier
/**
* Verify SplitSampler contract, that an equal number of records are taken
* from the first splits.
*/
@Test @SuppressWarnings("unchecked") public void testSplitSampler() throws Exception {
final int TOT_SPLITS=15;
final int NUM_SPLITS=5;
final int STEP_SAMPLE=5;
final int NUM_SAMPLES=NUM_SPLITS * STEP_SAMPLE;
InputSampler.Sampler sampler=new InputSampler.SplitSampler(NUM_SAMPLES,NUM_SPLITS);
int inits[]=new int[TOT_SPLITS];
for (int i=0; i < TOT_SPLITS; ++i) {
inits[i]=i * STEP_SAMPLE;
}
Job ignored=Job.getInstance();
Object[] samples=sampler.getSample(new TestInputSamplerIF(100000,TOT_SPLITS,inits),ignored);
assertEquals(NUM_SAMPLES,samples.length);
Arrays.sort(samples,new IntWritable.Comparator());
for (int i=0; i < NUM_SAMPLES; ++i) {
assertEquals(i,((IntWritable)samples[i]).get());
}
}
IterativeVerifierEqualityVerifier
/**
* Verify SplitSampler contract in mapred.lib.InputSampler, which is added
* back for binary compatibility of M/R 1.x
*/
@Test(timeout=30000) @SuppressWarnings("unchecked") public void testMapredSplitSampler() throws Exception {
final int TOT_SPLITS=15;
final int NUM_SPLITS=5;
final int STEP_SAMPLE=5;
final int NUM_SAMPLES=NUM_SPLITS * STEP_SAMPLE;
org.apache.hadoop.mapred.lib.InputSampler.Sampler sampler=new org.apache.hadoop.mapred.lib.InputSampler.SplitSampler(NUM_SAMPLES,NUM_SPLITS);
int inits[]=new int[TOT_SPLITS];
for (int i=0; i < TOT_SPLITS; ++i) {
inits[i]=i * STEP_SAMPLE;
}
Object[] samples=sampler.getSample(new TestMapredInputSamplerIF(100000,TOT_SPLITS,inits),new JobConf());
assertEquals(NUM_SAMPLES,samples.length);
Arrays.sort(samples,new IntWritable.Comparator());
for (int i=0; i < NUM_SAMPLES; ++i) {
assertEquals(i % STEP_SAMPLE + TOT_SPLITS * (i / STEP_SAMPLE),((IntWritable)samples[i]).get());
}
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
/**
* run a distributed job and verify that TokenCache is available
* @throws IOException
*/
@Test public void testBinaryTokenFile() throws IOException {
Configuration conf=mrCluster.getConfig();
final String nnUri=dfsCluster.getURI(0).toString();
conf.set(MRJobConfig.JOB_NAMENODES,nnUri + "," + nnUri);
final String[] args={"-m","1","-r","1","-mt","1","-rt","1"};
int res=-1;
try {
res=ToolRunner.run(conf,new MySleepJob(),args);
}
catch ( Exception e) {
System.out.println("Job failed with " + e.getLocalizedMessage());
e.printStackTrace(System.out);
fail("Job failed");
}
assertEquals("dist job res is not 0:",0,res);
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=10000) public void testMemoryMerge() throws Exception {
final int TOTAL_MEM_BYTES=10000;
final int OUTPUT_SIZE=7950;
JobConf conf=new JobConf();
conf.setFloat(MRJobConfig.SHUFFLE_INPUT_BUFFER_PERCENT,1.0f);
conf.setLong(MRJobConfig.REDUCE_MEMORY_TOTAL_BYTES,TOTAL_MEM_BYTES);
conf.setFloat(MRJobConfig.SHUFFLE_MEMORY_LIMIT_PERCENT,0.8f);
conf.setFloat(MRJobConfig.SHUFFLE_MERGE_PERCENT,0.9f);
TestExceptionReporter reporter=new TestExceptionReporter();
CyclicBarrier mergeStart=new CyclicBarrier(2);
CyclicBarrier mergeComplete=new CyclicBarrier(2);
StubbedMergeManager mgr=new StubbedMergeManager(conf,reporter,mergeStart,mergeComplete);
MapOutput out1=mgr.reserve(null,OUTPUT_SIZE,0);
Assert.assertTrue("Should be a memory merge",(out1 instanceof InMemoryMapOutput));
InMemoryMapOutput mout1=(InMemoryMapOutput)out1;
fillOutput(mout1);
MapOutput out2=mgr.reserve(null,OUTPUT_SIZE,0);
Assert.assertTrue("Should be a memory merge",(out2 instanceof InMemoryMapOutput));
InMemoryMapOutput mout2=(InMemoryMapOutput)out2;
fillOutput(mout2);
MapOutput out3=mgr.reserve(null,OUTPUT_SIZE,0);
Assert.assertEquals("Should be told to wait",null,out3);
mout1.commit();
mout2.commit();
mergeStart.await();
Assert.assertEquals(1,mgr.getNumMerges());
out1=mgr.reserve(null,OUTPUT_SIZE,0);
Assert.assertTrue("Should be a memory merge",(out1 instanceof InMemoryMapOutput));
mout1=(InMemoryMapOutput)out1;
fillOutput(mout1);
out2=mgr.reserve(null,OUTPUT_SIZE,0);
Assert.assertTrue("Should be a memory merge",(out2 instanceof InMemoryMapOutput));
mout2=(InMemoryMapOutput)out2;
fillOutput(mout2);
out3=mgr.reserve(null,OUTPUT_SIZE,0);
Assert.assertEquals("Should be told to wait",null,out3);
mout1.commit();
mout2.commit();
mergeComplete.await();
mergeStart.await();
Assert.assertEquals(2,mgr.getNumMerges());
mergeComplete.await();
Assert.assertEquals(2,mgr.getNumMerges());
Assert.assertEquals("exception reporter invoked",0,reporter.getNumExceptions());
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@SuppressWarnings({"unchecked","deprecation"}) @Test(timeout=10000) public void testOnDiskMerger() throws IOException, URISyntaxException, InterruptedException {
JobConf jobConf=new JobConf();
final int SORT_FACTOR=5;
jobConf.setInt(MRJobConfig.IO_SORT_FACTOR,SORT_FACTOR);
MapOutputFile mapOutputFile=new MROutputFiles();
FileSystem fs=FileSystem.getLocal(jobConf);
MergeManagerImpl manager=new MergeManagerImpl(null,jobConf,fs,null,null,null,null,null,null,null,null,null,null,mapOutputFile);
MergeThread,IntWritable,IntWritable> onDiskMerger=(MergeThread,IntWritable,IntWritable>)Whitebox.getInternalState(manager,"onDiskMerger");
int mergeFactor=(Integer)Whitebox.getInternalState(onDiskMerger,"mergeFactor");
assertEquals(mergeFactor,SORT_FACTOR);
onDiskMerger.suspend();
Random rand=new Random();
for (int i=0; i < 2 * SORT_FACTOR; ++i) {
Path path=new Path("somePath");
CompressAwarePath cap=new CompressAwarePath(path,1l,rand.nextInt());
manager.closeOnDiskFile(cap);
}
LinkedList> pendingToBeMerged=(LinkedList>)Whitebox.getInternalState(onDiskMerger,"pendingToBeMerged");
assertTrue("No inputs were added to list pending to merge",pendingToBeMerged.size() > 0);
for (int i=0; i < pendingToBeMerged.size(); ++i) {
List inputs=pendingToBeMerged.get(i);
for (int j=1; j < inputs.size(); ++j) {
assertTrue("Not enough / too many inputs were going to be merged",inputs.size() > 0 && inputs.size() <= SORT_FACTOR);
assertTrue("Inputs to be merged were not sorted according to size: ",inputs.get(j).getCompressedSize() >= inputs.get(j - 1).getCompressedSize());
}
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@SuppressWarnings("rawtypes") @Test public void testTipFailed() throws Exception {
JobConf job=new JobConf();
job.setNumMapTasks(2);
TaskStatus status=new TaskStatus(){
@Override public boolean getIsMap(){
return false;
}
@Override public void addFetchFailedMap( TaskAttemptID mapTaskId){
}
}
;
Progress progress=new Progress();
TaskAttemptID reduceId=new TaskAttemptID("314159",0,TaskType.REDUCE,0,0);
ShuffleSchedulerImpl scheduler=new ShuffleSchedulerImpl(job,status,reduceId,null,progress,null,null,null);
JobID jobId=new JobID();
TaskID taskId1=new TaskID(jobId,TaskType.REDUCE,1);
scheduler.tipFailed(taskId1);
Assert.assertEquals("Progress should be 0.5",0.5f,progress.getProgress(),0.0f);
Assert.assertFalse(scheduler.waitUntilDone(1));
TaskID taskId0=new TaskID(jobId,TaskType.REDUCE,0);
scheduler.tipFailed(taskId0);
Assert.assertEquals("Progress should be 1.0",1.0f,progress.getProgress(),0.0f);
Assert.assertTrue(scheduler.waitUntilDone(1));
}
Class: org.apache.hadoop.mapreduce.tools.TestCLI
InternalCallVerifierEqualityVerifier
@Test public void testListAttemptIdsWithValidInput() throws Exception {
JobID jobId=JobID.forName(jobIdStr);
Cluster mockCluster=mock(Cluster.class);
Job job=mock(Job.class);
CLI cli=spy(new CLI());
doReturn(mockCluster).when(cli).createCluster();
when(job.getTaskReports(TaskType.MAP)).thenReturn(getTaskReports(jobId,TaskType.MAP));
when(job.getTaskReports(TaskType.REDUCE)).thenReturn(getTaskReports(jobId,TaskType.REDUCE));
when(mockCluster.getJob(jobId)).thenReturn(job);
int retCode_MAP=cli.run(new String[]{"-list-attempt-ids",jobIdStr,"MAP","running"});
int retCode_map=cli.run(new String[]{"-list-attempt-ids",jobIdStr,"map","running"});
int retCode_REDUCE=cli.run(new String[]{"-list-attempt-ids",jobIdStr,"REDUCE","running"});
int retCode_completed=cli.run(new String[]{"-list-attempt-ids",jobIdStr,"REDUCE","completed"});
assertEquals("MAP is a valid input,exit code should be 0",0,retCode_MAP);
assertEquals("map is a valid input,exit code should be 0",0,retCode_map);
assertEquals("REDUCE is a valid input,exit code should be 0",0,retCode_REDUCE);
assertEquals("REDUCE and completed are a valid inputs to -list-attempt-ids,exit code should be 0",0,retCode_completed);
verify(job,times(2)).getTaskReports(TaskType.MAP);
verify(job,times(2)).getTaskReports(TaskType.REDUCE);
}
InternalCallVerifierEqualityVerifier
@Test public void testListAttemptIdsWithInvalidInputs() throws Exception {
JobID jobId=JobID.forName(jobIdStr);
Cluster mockCluster=mock(Cluster.class);
Job job=mock(Job.class);
CLI cli=spy(new CLI());
doReturn(mockCluster).when(cli).createCluster();
when(mockCluster.getJob(jobId)).thenReturn(job);
int retCode_JOB_SETUP=cli.run(new String[]{"-list-attempt-ids",jobIdStr,"JOB_SETUP","running"});
int retCode_JOB_CLEANUP=cli.run(new String[]{"-list-attempt-ids",jobIdStr,"JOB_CLEANUP","running"});
int retCode_invalidTaskState=cli.run(new String[]{"-list-attempt-ids",jobIdStr,"REDUCE","complete"});
assertEquals("JOB_SETUP is an invalid input,exit code should be -1",-1,retCode_JOB_SETUP);
assertEquals("JOB_CLEANUP is an invalid input,exit code should be -1",-1,retCode_JOB_CLEANUP);
assertEquals("complete is an invalid input,exit code should be -1",-1,retCode_invalidTaskState);
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* To ensure nothing broken after we removed normalization
* from the MRAM side
* @throws Exception
*/
@Test public void testJobWithNonNormalizedCapabilities() throws Exception {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
JobConf jobConf=new JobConf(mrCluster.getConfig());
jobConf.setInt("mapreduce.map.memory.mb",700);
jobConf.setInt("mapred.reduce.memory.mb",1500);
SleepJob sleepJob=new SleepJob();
sleepJob.setConf(jobConf);
Job job=sleepJob.createJob(3,2,1000,1,500,1);
job.setJarByClass(SleepJob.class);
job.addFileToClassPath(APP_JAR);
job.submit();
boolean completed=job.waitForCompletion(true);
Assert.assertTrue("Job should be completed",completed);
Assert.assertEquals("Job should be finished successfully",JobStatus.State.SUCCEEDED,job.getJobState());
}
Class: org.apache.hadoop.mapreduce.v2.TestMRJobs
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=300000) public void testSleepJob() throws IOException, InterruptedException, ClassNotFoundException {
LOG.info("\n\n\nStarting testSleepJob().");
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
Configuration sleepConf=new Configuration(mrCluster.getConfig());
sleepConf.set(MRConfig.MASTER_ADDRESS,"local");
SleepJob sleepJob=new SleepJob();
sleepJob.setConf(sleepConf);
int numReduces=sleepConf.getInt("TestMRJobs.testSleepJob.reduces",2);
Job job=sleepJob.createJob(3,numReduces,10000,1,5000,1);
job.addFileToClassPath(APP_JAR);
job.setJarByClass(SleepJob.class);
job.setMaxMapAttempts(1);
job.submit();
String trackingUrl=job.getTrackingURL();
String jobId=job.getJobID().toString();
boolean succeeded=job.waitForCompletion(true);
Assert.assertTrue(succeeded);
Assert.assertEquals(JobStatus.State.SUCCEEDED,job.getJobState());
Assert.assertTrue("Tracking URL was " + trackingUrl + " but didn't Match Job ID "+ jobId,trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/"));
verifySleepJobCounters(job);
verifyTaskProgress(job);
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=60000) public void testRandomWriter() throws IOException, InterruptedException, ClassNotFoundException {
LOG.info("\n\n\nStarting testRandomWriter().");
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
RandomTextWriterJob randomWriterJob=new RandomTextWriterJob();
mrCluster.getConfig().set(RandomTextWriterJob.TOTAL_BYTES,"3072");
mrCluster.getConfig().set(RandomTextWriterJob.BYTES_PER_MAP,"1024");
Job job=randomWriterJob.createJob(mrCluster.getConfig());
Path outputDir=new Path(OUTPUT_ROOT_DIR,"random-output");
FileOutputFormat.setOutputPath(job,outputDir);
job.setSpeculativeExecution(false);
job.addFileToClassPath(APP_JAR);
job.setJarByClass(RandomTextWriterJob.class);
job.setMaxMapAttempts(1);
job.submit();
String trackingUrl=job.getTrackingURL();
String jobId=job.getJobID().toString();
boolean succeeded=job.waitForCompletion(true);
Assert.assertTrue(succeeded);
Assert.assertEquals(JobStatus.State.SUCCEEDED,job.getJobState());
Assert.assertTrue("Tracking URL was " + trackingUrl + " but didn't Match Job ID "+ jobId,trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/"));
RemoteIterator iterator=FileContext.getFileContext(mrCluster.getConfig()).listStatus(outputDir);
int count=0;
while (iterator.hasNext()) {
FileStatus file=iterator.next();
if (!file.getPath().getName().equals(FileOutputCommitter.SUCCEEDED_FILE_NAME)) {
count++;
}
}
Assert.assertEquals("Number of part files is wrong!",3,count);
verifyRandomWriterCounters(job);
}
IterativeVerifierBranchVerifierInternalCallVerifierBooleanVerifierIdentityVerifierEqualityVerifierHybridVerifier
@Test(timeout=120000) public void testContainerRollingLog() throws IOException, InterruptedException, ClassNotFoundException {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
final SleepJob sleepJob=new SleepJob();
final JobConf sleepConf=new JobConf(mrCluster.getConfig());
sleepConf.set(MRJobConfig.MAP_LOG_LEVEL,Level.ALL.toString());
final long userLogKb=4;
sleepConf.setLong(MRJobConfig.TASK_USERLOG_LIMIT,userLogKb);
sleepConf.setInt(MRJobConfig.TASK_LOG_BACKUPS,3);
sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL,Level.ALL.toString());
final long amLogKb=7;
sleepConf.setLong(MRJobConfig.MR_AM_LOG_KB,amLogKb);
sleepConf.setInt(MRJobConfig.MR_AM_LOG_BACKUPS,7);
sleepJob.setConf(sleepConf);
final Job job=sleepJob.createJob(1,0,1L,100,0L,0);
job.setJarByClass(SleepJob.class);
job.addFileToClassPath(APP_JAR);
job.waitForCompletion(true);
final JobId jobId=TypeConverter.toYarn(job.getJobID());
final ApplicationId appID=jobId.getAppId();
int pollElapsed=0;
while (true) {
Thread.sleep(1000);
pollElapsed+=1000;
if (TERMINAL_RM_APP_STATES.contains(mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState())) {
break;
}
if (pollElapsed >= 60000) {
LOG.warn("application did not reach terminal state within 60 seconds");
break;
}
}
Assert.assertEquals(RMAppState.FINISHED,mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState());
final String appIdStr=appID.toString();
final String appIdSuffix=appIdStr.substring("application_".length(),appIdStr.length());
final String containerGlob="container_" + appIdSuffix + "_*_*";
final String syslogGlob=appIdStr + Path.SEPARATOR + containerGlob+ Path.SEPARATOR+ TaskLog.LogName.SYSLOG;
int numAppMasters=0;
int numMapTasks=0;
for (int i=0; i < NUM_NODE_MGRS; i++) {
final Configuration nmConf=mrCluster.getNodeManager(i).getConfig();
for ( String logDir : nmConf.getTrimmedStrings(YarnConfiguration.NM_LOG_DIRS)) {
final Path absSyslogGlob=new Path(logDir + Path.SEPARATOR + syslogGlob);
LOG.info("Checking for glob: " + absSyslogGlob);
final FileStatus[] syslogs=localFs.globStatus(absSyslogGlob);
for ( FileStatus slog : syslogs) {
boolean foundAppMaster=job.isUber();
final Path containerPathComponent=slog.getPath().getParent();
if (!foundAppMaster) {
final ContainerId cid=ConverterUtils.toContainerId(containerPathComponent.getName());
foundAppMaster=(cid.getId() == 1);
}
final FileStatus[] sysSiblings=localFs.globStatus(new Path(containerPathComponent,TaskLog.LogName.SYSLOG + "*"));
Arrays.sort(sysSiblings);
if (foundAppMaster) {
numAppMasters++;
}
else {
numMapTasks++;
}
if (foundAppMaster) {
Assert.assertSame("Unexpected number of AM sylog* files",sleepConf.getInt(MRJobConfig.MR_AM_LOG_BACKUPS,0) + 1,sysSiblings.length);
Assert.assertTrue("AM syslog.1 length kb should be >= " + amLogKb,sysSiblings[1].getLen() >= amLogKb * 1024);
}
else {
Assert.assertSame("Unexpected number of MR task sylog* files",sleepConf.getInt(MRJobConfig.TASK_LOG_BACKUPS,0) + 1,sysSiblings.length);
Assert.assertTrue("MR syslog.1 length kb should be >= " + userLogKb,sysSiblings[1].getLen() >= userLogKb * 1024);
}
}
}
}
Assert.assertEquals("No AppMaster log found!",1,numAppMasters);
if (sleepConf.getBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false)) {
Assert.assertEquals("MapTask log with uber found!",0,numMapTasks);
}
else {
Assert.assertEquals("No MapTask log found!",1,numMapTasks);
}
}
InternalCallVerifierEqualityVerifier
@Test(timeout=60000) public void testFailingMapper() throws IOException, InterruptedException, ClassNotFoundException {
LOG.info("\n\n\nStarting testFailingMapper().");
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
Job job=runFailingMapperJob();
TaskID taskID=new TaskID(job.getJobID(),TaskType.MAP,0);
TaskAttemptID aId=new TaskAttemptID(taskID,0);
System.out.println("Diagnostics for " + aId + " :");
for ( String diag : job.getTaskDiagnostics(aId)) {
System.out.println(diag);
}
aId=new TaskAttemptID(taskID,1);
System.out.println("Diagnostics for " + aId + " :");
for ( String diag : job.getTaskDiagnostics(aId)) {
System.out.println(diag);
}
TaskCompletionEvent[] events=job.getTaskCompletionEvents(0,2);
Assert.assertEquals(TaskCompletionEvent.Status.FAILED,events[0].getStatus());
Assert.assertEquals(TaskCompletionEvent.Status.TIPFAILED,events[1].getStatus());
Assert.assertEquals(JobStatus.State.FAILED,job.getJobState());
verifyFailingMapperCounters(job);
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testTaskFailWithUnusedContainer() throws Exception {
MRApp app=new MRAppWithFailingTaskAndUnusedContainer();
Configuration conf=new Configuration();
int maxAttempts=1;
conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS,maxAttempts);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Map tasks=job.getTasks();
Assert.assertEquals("Num tasks is not correct",1,tasks.size());
Task task=tasks.values().iterator().next();
app.waitForState(task,TaskState.SCHEDULED);
Map attempts=tasks.values().iterator().next().getAttempts();
Assert.assertEquals("Num attempts is not correct",maxAttempts,attempts.size());
TaskAttempt attempt=attempts.values().iterator().next();
app.waitForInternalState((TaskAttemptImpl)attempt,TaskAttemptStateInternal.ASSIGNED);
app.getDispatcher().getEventHandler().handle(new TaskAttemptEvent(attempt.getID(),TaskAttemptEventType.TA_CONTAINER_COMPLETED));
app.waitForState(job,JobState.FAILED);
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testFailTask() throws Exception {
MRApp app=new MockFirstFailingAttemptMRApp(1,0);
Configuration conf=new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
Job job=app.submit(conf);
app.waitForState(job,JobState.SUCCEEDED);
Map tasks=job.getTasks();
Assert.assertEquals("Num tasks is not correct",1,tasks.size());
Task task=tasks.values().iterator().next();
Assert.assertEquals("Task state not correct",TaskState.SUCCEEDED,task.getReport().getTaskState());
Map attempts=tasks.values().iterator().next().getAttempts();
Assert.assertEquals("Num attempts is not correct",2,attempts.size());
Iterator it=attempts.values().iterator();
Assert.assertEquals("Attempt state not correct",TaskAttemptState.FAILED,it.next().getReport().getTaskAttemptState());
Assert.assertEquals("Attempt state not correct",TaskAttemptState.SUCCEEDED,it.next().getReport().getTaskAttemptState());
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testTimedOutTask() throws Exception {
MRApp app=new TimeOutTaskMRApp(1,0);
Configuration conf=new Configuration();
int maxAttempts=2;
conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS,maxAttempts);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
Job job=app.submit(conf);
app.waitForState(job,JobState.FAILED);
Map tasks=job.getTasks();
Assert.assertEquals("Num tasks is not correct",1,tasks.size());
Task task=tasks.values().iterator().next();
Assert.assertEquals("Task state not correct",TaskState.FAILED,task.getReport().getTaskState());
Map attempts=tasks.values().iterator().next().getAttempts();
Assert.assertEquals("Num attempts is not correct",maxAttempts,attempts.size());
for ( TaskAttempt attempt : attempts.values()) {
Assert.assertEquals("Attempt state not correct",TaskAttemptState.FAILED,attempt.getReport().getTaskAttemptState());
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testFetchFailureMultipleReduces() throws Exception {
MRApp app=new MRApp(1,3,false,this.getClass().getName(),true);
Configuration conf=new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("Num tasks not correct",4,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask=it.next();
Task reduceTask=it.next();
Task reduceTask2=it.next();
Task reduceTask3=it.next();
app.waitForState(mapTask,TaskState.RUNNING);
TaskAttempt mapAttempt1=mapTask.getAttempts().values().iterator().next();
app.waitForState(mapAttempt1,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask,TaskState.SUCCEEDED);
TaskAttemptCompletionEvent[] events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Num completion events not correct",1,events.length);
Assert.assertEquals("Event status not correct",TaskAttemptCompletionEventStatus.SUCCEEDED,events[0].getStatus());
app.waitForState(reduceTask,TaskState.RUNNING);
app.waitForState(reduceTask2,TaskState.RUNNING);
app.waitForState(reduceTask3,TaskState.RUNNING);
TaskAttempt reduceAttempt=reduceTask.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt,TaskAttemptState.RUNNING);
updateStatus(app,reduceAttempt,Phase.SHUFFLE);
TaskAttempt reduceAttempt2=reduceTask2.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt2,TaskAttemptState.RUNNING);
updateStatus(app,reduceAttempt2,Phase.SHUFFLE);
TaskAttempt reduceAttempt3=reduceTask3.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt3,TaskAttemptState.RUNNING);
updateStatus(app,reduceAttempt3,Phase.SHUFFLE);
sendFetchFailure(app,reduceAttempt,mapAttempt1);
sendFetchFailure(app,reduceAttempt,mapAttempt1);
assertEquals(TaskState.SUCCEEDED,mapTask.getState());
updateStatus(app,reduceAttempt2,Phase.REDUCE);
updateStatus(app,reduceAttempt3,Phase.REDUCE);
sendFetchFailure(app,reduceAttempt,mapAttempt1);
app.waitForState(mapTask,TaskState.RUNNING);
Assert.assertEquals("Map TaskAttempt state not correct",TaskAttemptState.FAILED,mapAttempt1.getState());
Assert.assertEquals("Num attempts in Map Task not correct",2,mapTask.getAttempts().size());
Iterator atIt=mapTask.getAttempts().values().iterator();
atIt.next();
TaskAttempt mapAttempt2=atIt.next();
app.waitForState(mapAttempt2,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt2.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask,TaskState.SUCCEEDED);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt.getID(),TaskAttemptEventType.TA_DONE));
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt2.getID(),TaskAttemptEventType.TA_DONE));
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt3.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(job,JobState.SUCCEEDED);
Assert.assertEquals("Event status not correct",TaskAttemptCompletionEventStatus.OBSOLETE,events[0].getStatus());
events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Num completion events not correct",6,events.length);
Assert.assertEquals("Event map attempt id not correct",mapAttempt1.getID(),events[0].getAttemptId());
Assert.assertEquals("Event map attempt id not correct",mapAttempt1.getID(),events[1].getAttemptId());
Assert.assertEquals("Event map attempt id not correct",mapAttempt2.getID(),events[2].getAttemptId());
Assert.assertEquals("Event reduce attempt id not correct",reduceAttempt.getID(),events[3].getAttemptId());
Assert.assertEquals("Event status not correct for map attempt1",TaskAttemptCompletionEventStatus.OBSOLETE,events[0].getStatus());
Assert.assertEquals("Event status not correct for map attempt1",TaskAttemptCompletionEventStatus.FAILED,events[1].getStatus());
Assert.assertEquals("Event status not correct for map attempt2",TaskAttemptCompletionEventStatus.SUCCEEDED,events[2].getStatus());
Assert.assertEquals("Event status not correct for reduce attempt1",TaskAttemptCompletionEventStatus.SUCCEEDED,events[3].getStatus());
TaskCompletionEvent mapEvents[]=job.getMapAttemptCompletionEvents(0,2);
TaskCompletionEvent convertedEvents[]=TypeConverter.fromYarn(events);
Assert.assertEquals("Incorrect number of map events",2,mapEvents.length);
Assert.assertArrayEquals("Unexpected map events",Arrays.copyOfRange(convertedEvents,0,2),mapEvents);
mapEvents=job.getMapAttemptCompletionEvents(2,200);
Assert.assertEquals("Incorrect number of map events",1,mapEvents.length);
Assert.assertEquals("Unexpected map event",convertedEvents[2],mapEvents[0]);
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testFetchFailure() throws Exception {
MRApp app=new MRApp(1,1,false,this.getClass().getName(),true);
Configuration conf=new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("Num tasks not correct",2,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask=it.next();
Task reduceTask=it.next();
app.waitForState(mapTask,TaskState.RUNNING);
TaskAttempt mapAttempt1=mapTask.getAttempts().values().iterator().next();
app.waitForState(mapAttempt1,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask,TaskState.SUCCEEDED);
TaskAttemptCompletionEvent[] events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Num completion events not correct",1,events.length);
Assert.assertEquals("Event status not correct",TaskAttemptCompletionEventStatus.SUCCEEDED,events[0].getStatus());
app.waitForState(reduceTask,TaskState.RUNNING);
TaskAttempt reduceAttempt=reduceTask.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt,TaskAttemptState.RUNNING);
sendFetchFailure(app,reduceAttempt,mapAttempt1);
sendFetchFailure(app,reduceAttempt,mapAttempt1);
sendFetchFailure(app,reduceAttempt,mapAttempt1);
app.waitForState(mapTask,TaskState.RUNNING);
Assert.assertEquals("Map TaskAttempt state not correct",TaskAttemptState.FAILED,mapAttempt1.getState());
Assert.assertEquals("Num attempts in Map Task not correct",2,mapTask.getAttempts().size());
Iterator atIt=mapTask.getAttempts().values().iterator();
atIt.next();
TaskAttempt mapAttempt2=atIt.next();
app.waitForState(mapAttempt2,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt2.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask,TaskState.SUCCEEDED);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(job,JobState.SUCCEEDED);
Assert.assertEquals("Event status not correct",TaskAttemptCompletionEventStatus.OBSOLETE,events[0].getStatus());
events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Num completion events not correct",4,events.length);
Assert.assertEquals("Event map attempt id not correct",mapAttempt1.getID(),events[0].getAttemptId());
Assert.assertEquals("Event map attempt id not correct",mapAttempt1.getID(),events[1].getAttemptId());
Assert.assertEquals("Event map attempt id not correct",mapAttempt2.getID(),events[2].getAttemptId());
Assert.assertEquals("Event redude attempt id not correct",reduceAttempt.getID(),events[3].getAttemptId());
Assert.assertEquals("Event status not correct for map attempt1",TaskAttemptCompletionEventStatus.OBSOLETE,events[0].getStatus());
Assert.assertEquals("Event status not correct for map attempt1",TaskAttemptCompletionEventStatus.FAILED,events[1].getStatus());
Assert.assertEquals("Event status not correct for map attempt2",TaskAttemptCompletionEventStatus.SUCCEEDED,events[2].getStatus());
Assert.assertEquals("Event status not correct for reduce attempt1",TaskAttemptCompletionEventStatus.SUCCEEDED,events[3].getStatus());
TaskCompletionEvent mapEvents[]=job.getMapAttemptCompletionEvents(0,2);
TaskCompletionEvent convertedEvents[]=TypeConverter.fromYarn(events);
Assert.assertEquals("Incorrect number of map events",2,mapEvents.length);
Assert.assertArrayEquals("Unexpected map events",Arrays.copyOfRange(convertedEvents,0,2),mapEvents);
mapEvents=job.getMapAttemptCompletionEvents(2,200);
Assert.assertEquals("Incorrect number of map events",1,mapEvents.length);
Assert.assertEquals("Unexpected map event",convertedEvents[2],mapEvents[0]);
}
InternalCallVerifierEqualityVerifier
/**
* This tests that if a map attempt was failed (say due to fetch failures),
* then it gets re-run. When the next map attempt is running, if the AM dies,
* then, on AM re-run, the AM does not incorrectly remember the first failed
* attempt. Currently recovery does not recover running tasks. Effectively,
* the AM re-runs the maps from scratch.
*/
@Test public void testFetchFailureWithRecovery() throws Exception {
int runCount=0;
MRApp app=new MRAppWithHistory(1,1,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("Num tasks not correct",2,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask=it.next();
Task reduceTask=it.next();
app.waitForState(mapTask,TaskState.RUNNING);
TaskAttempt mapAttempt1=mapTask.getAttempts().values().iterator().next();
app.waitForState(mapAttempt1,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask,TaskState.SUCCEEDED);
TaskAttemptCompletionEvent[] events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Num completion events not correct",1,events.length);
Assert.assertEquals("Event status not correct",TaskAttemptCompletionEventStatus.SUCCEEDED,events[0].getStatus());
app.waitForState(reduceTask,TaskState.RUNNING);
TaskAttempt reduceAttempt=reduceTask.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt,TaskAttemptState.RUNNING);
sendFetchFailure(app,reduceAttempt,mapAttempt1);
sendFetchFailure(app,reduceAttempt,mapAttempt1);
sendFetchFailure(app,reduceAttempt,mapAttempt1);
app.waitForState(mapTask,TaskState.RUNNING);
app.stop();
app=new MRAppWithHistory(1,1,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("Num tasks not correct",2,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask=it.next();
reduceTask=it.next();
app.waitForState(mapTask,TaskState.RUNNING);
mapAttempt1=mapTask.getAttempts().values().iterator().next();
app.waitForState(mapAttempt1,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask,TaskState.SUCCEEDED);
reduceAttempt=reduceTask.getAttempts().values().iterator().next();
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(job,JobState.SUCCEEDED);
events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Num completion events not correct",2,events.length);
}
BooleanVerifierEqualityVerifierHybridVerifier
@Test public void testNotifyRetries() throws InterruptedException {
JobConf conf=new JobConf();
conf.set(MRJobConfig.MR_JOB_END_RETRY_ATTEMPTS,"0");
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS,"1");
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_URL,"http://nonexistent");
conf.set(MRJobConfig.MR_JOB_END_RETRY_INTERVAL,"5000");
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_RETRY_INTERVAL,"5000");
JobReport jobReport=mock(JobReport.class);
long startTime=System.currentTimeMillis();
this.notificationCount=0;
this.setConf(conf);
this.notify(jobReport);
long endTime=System.currentTimeMillis();
Assert.assertEquals("Only 1 try was expected but was : " + this.notificationCount,1,this.notificationCount);
Assert.assertTrue("Should have taken more than 5 seconds it took " + (endTime - startTime),endTime - startTime > 5000);
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS,"3");
conf.set(MRJobConfig.MR_JOB_END_RETRY_ATTEMPTS,"3");
conf.set(MRJobConfig.MR_JOB_END_RETRY_INTERVAL,"3000");
conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_RETRY_INTERVAL,"3000");
startTime=System.currentTimeMillis();
this.notificationCount=0;
this.setConf(conf);
this.notify(jobReport);
endTime=System.currentTimeMillis();
Assert.assertEquals("Only 3 retries were expected but was : " + this.notificationCount,3,this.notificationCount);
Assert.assertTrue("Should have taken more than 9 seconds it took " + (endTime - startTime),endTime - startTime > 9000);
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testKillTaskAttempt() throws Exception {
final CountDownLatch latch=new CountDownLatch(1);
MRApp app=new BlockingMRApp(2,0,latch);
Job job=app.submit(new Configuration());
app.waitForState(job,JobState.RUNNING);
Map tasks=job.getTasks();
Assert.assertEquals("No of tasks is not correct",2,tasks.size());
Iterator it=tasks.values().iterator();
Task task1=it.next();
Task task2=it.next();
app.waitForState(task1,TaskState.SCHEDULED);
app.waitForState(task2,TaskState.SCHEDULED);
TaskAttempt attempt=task1.getAttempts().values().iterator().next();
app.getContext().getEventHandler().handle(new TaskAttemptEvent(attempt.getID(),TaskAttemptEventType.TA_KILL));
latch.countDown();
app.waitForState(job,JobState.SUCCEEDED);
Assert.assertEquals("Task state not correct",TaskState.SUCCEEDED,task1.getReport().getTaskState());
Assert.assertEquals("Task state not correct",TaskState.SUCCEEDED,task2.getReport().getTaskState());
Map attempts=task1.getAttempts();
Assert.assertEquals("No of attempts is not correct",2,attempts.size());
Iterator iter=attempts.values().iterator();
Assert.assertEquals("Attempt state not correct",TaskAttemptState.KILLED,iter.next().getReport().getTaskAttemptState());
Assert.assertEquals("Attempt state not correct",TaskAttemptState.SUCCEEDED,iter.next().getReport().getTaskAttemptState());
attempts=task2.getAttempts();
Assert.assertEquals("No of attempts is not correct",1,attempts.size());
iter=attempts.values().iterator();
Assert.assertEquals("Attempt state not correct",TaskAttemptState.SUCCEEDED,iter.next().getReport().getTaskAttemptState());
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testKillJob() throws Exception {
final CountDownLatch latch=new CountDownLatch(1);
MRApp app=new BlockingMRApp(1,0,latch);
Job job=app.submit(new Configuration());
app.waitForState(job,JobState.RUNNING);
app.getContext().getEventHandler().handle(new JobEvent(job.getID(),JobEventType.JOB_KILL));
latch.countDown();
app.waitForState(job,JobState.KILLED);
Map tasks=job.getTasks();
Assert.assertEquals("No of tasks is not correct",1,tasks.size());
Task task=tasks.values().iterator().next();
Assert.assertEquals("Task state not correct",TaskState.KILLED,task.getReport().getTaskState());
Map attempts=tasks.values().iterator().next().getAttempts();
Assert.assertEquals("No of attempts is not correct",1,attempts.size());
Iterator it=attempts.values().iterator();
Assert.assertEquals("Attempt state not correct",TaskAttemptState.KILLED,it.next().getReport().getTaskAttemptState());
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testKillTask() throws Exception {
final CountDownLatch latch=new CountDownLatch(1);
MRApp app=new BlockingMRApp(2,0,latch);
Job job=app.submit(new Configuration());
app.waitForState(job,JobState.RUNNING);
Map tasks=job.getTasks();
Assert.assertEquals("No of tasks is not correct",2,tasks.size());
Iterator it=tasks.values().iterator();
Task task1=it.next();
Task task2=it.next();
app.getContext().getEventHandler().handle(new TaskEvent(task1.getID(),TaskEventType.T_KILL));
latch.countDown();
app.waitForState(job,JobState.SUCCEEDED);
Assert.assertEquals("Task state not correct",TaskState.KILLED,task1.getReport().getTaskState());
Assert.assertEquals("Task state not correct",TaskState.SUCCEEDED,task2.getReport().getTaskState());
Map attempts=task1.getAttempts();
Assert.assertEquals("No of attempts is not correct",1,attempts.size());
Iterator iter=attempts.values().iterator();
Assert.assertEquals("Attempt state not correct",TaskAttemptState.KILLED,iter.next().getReport().getTaskAttemptState());
attempts=task2.getAttempts();
Assert.assertEquals("No of attempts is not correct",1,attempts.size());
iter=attempts.values().iterator();
Assert.assertEquals("Attempt state not correct",TaskAttemptState.SUCCEEDED,iter.next().getReport().getTaskAttemptState());
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* The test verifies that the AM re-runs maps that have run on bad nodes. It
* also verifies that the AM records all success/killed events so that reduces
* are notified about map output status changes. It also verifies that the
* re-run information is preserved across AM restart
*/
@Test public void testUpdatedNodes() throws Exception {
int runCount=0;
MRApp app=new MRAppWithHistory(2,2,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART,0.5f);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("Num tasks not correct",4,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask1=it.next();
Task mapTask2=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
app.waitForState(mapTask2,TaskState.RUNNING);
TaskAttempt task1Attempt=mapTask1.getAttempts().values().iterator().next();
TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next();
NodeId node1=task1Attempt.getNodeId();
NodeId node2=task2Attempt.getNodeId();
Assert.assertEquals(node1,node2);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task2Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.SUCCEEDED);
TaskAttemptCompletionEvent[] events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Expecting 2 completion events for success",2,events.length);
ArrayList updatedNodes=new ArrayList();
NodeReport nr=RecordFactoryProvider.getRecordFactory(null).newRecordInstance(NodeReport.class);
nr.setNodeId(node1);
nr.setNodeState(NodeState.UNHEALTHY);
updatedNodes.add(nr);
app.getContext().getEventHandler().handle(new JobUpdatedNodesEvent(job.getID(),updatedNodes));
app.waitForState(task1Attempt,TaskAttemptState.KILLED);
app.waitForState(task2Attempt,TaskAttemptState.KILLED);
events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Expecting 2 more completion events for killed",4,events.length);
app.waitForState(mapTask1,TaskState.RUNNING);
app.waitForState(mapTask2,TaskState.RUNNING);
Iterator itr=mapTask1.getAttempts().values().iterator();
itr.next();
task1Attempt=itr.next();
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.RUNNING);
events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Expecting 1 more completion events for success",5,events.length);
app.stop();
app=new MRAppWithHistory(2,2,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",4,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
mapTask2=it.next();
Task reduceTask1=it.next();
Task reduceTask2=it.next();
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.RUNNING);
events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Expecting 2 completion events for killed & success of map1",2,events.length);
task2Attempt=mapTask2.getAttempts().values().iterator().next();
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task2Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask2,TaskState.SUCCEEDED);
events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Expecting 1 more completion events for success",3,events.length);
app.waitForState(reduceTask1,TaskState.RUNNING);
app.waitForState(reduceTask2,TaskState.RUNNING);
TaskAttempt task3Attempt=reduceTask1.getAttempts().values().iterator().next();
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task3Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(reduceTask1,TaskState.SUCCEEDED);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task3Attempt.getID(),TaskAttemptEventType.TA_KILL));
app.waitForState(reduceTask1,TaskState.SUCCEEDED);
TaskAttempt task4Attempt=reduceTask2.getAttempts().values().iterator().next();
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task4Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(reduceTask2,TaskState.SUCCEEDED);
events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Expecting 2 more completion events for reduce success",5,events.length);
app.waitForState(job,JobState.SUCCEEDED);
}
InternalCallVerifierEqualityVerifier
/**
* This test case primarily verifies if the recovery is controlled through config
* property. In this case, recover is turned ON. AM with 3 maps and 0 reduce.
* AM crashes after the first two tasks finishes and recovers completely and
* succeeds in the second generation.
* @throws Exception
*/
@Test public void testRecoverySuccessUsingCustomOutputCommitter() throws Exception {
int runCount=0;
MRApp app=new MRAppWithHistory(3,0,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setClass("mapred.output.committer.class",TestFileOutputCommitter.class,org.apache.hadoop.mapred.OutputCommitter.class);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setBoolean("want.am.recovery",true);
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask1=it.next();
Task mapTask2=it.next();
Task mapTask3=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
app.waitForState(mapTask2,TaskState.RUNNING);
app.waitForState(mapTask3,TaskState.RUNNING);
TaskAttempt task1Attempt=mapTask1.getAttempts().values().iterator().next();
TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next();
TaskAttempt task3Attempt=mapTask3.getAttempts().values().iterator().next();
app.waitForState(task1Attempt,TaskAttemptState.RUNNING);
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
app.waitForState(task3Attempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task2Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.SUCCEEDED);
app.stop();
app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setClass("mapred.output.committer.class",TestFileOutputCommitter.class,org.apache.hadoop.mapred.OutputCommitter.class);
conf.setBoolean("want.am.recovery",true);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setInt(MRJobConfig.NUM_REDUCES,0);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
mapTask2=it.next();
mapTask3=it.next();
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.SUCCEEDED);
app.waitForState(mapTask3,TaskState.RUNNING);
task3Attempt=mapTask3.getAttempts().values().iterator().next();
app.waitForState(task3Attempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask3.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask3,TaskState.SUCCEEDED);
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* AM with 2 maps and 1 reduce. For 1st map, one attempt fails, one attempt
* completely disappears because of failed launch, one attempt gets killed and
* one attempt succeeds. AM crashes after the first tasks finishes and
* recovers completely and succeeds in the second generation.
* @throws Exception
*/
@Test public void testSpeculative() throws Exception {
int runCount=0;
long am1StartTimeEst=System.currentTimeMillis();
MRApp app=new MRAppWithHistory(2,1,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
long jobStartTime=job.getReport().getStartTime();
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask1=it.next();
Task mapTask2=it.next();
Task reduceTask=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
app.waitForState(mapTask2,TaskState.RUNNING);
app.getContext().getEventHandler().handle(new TaskEvent(mapTask1.getID(),TaskEventType.T_ADD_SPEC_ATTEMPT));
int timeOut=0;
while (mapTask1.getAttempts().size() != 2 && timeOut++ < 10) {
Thread.sleep(1000);
LOG.info("Waiting for next attempt to start");
}
Iterator t1it=mapTask1.getAttempts().values().iterator();
TaskAttempt task1Attempt1=t1it.next();
TaskAttempt task1Attempt2=t1it.next();
TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next();
ContainerId t1a2contId=task1Attempt2.getAssignedContainerID();
LOG.info(t1a2contId.toString());
LOG.info(task1Attempt1.getID().toString());
LOG.info(task1Attempt2.getID().toString());
app.getContext().getEventHandler().handle(new TaskAttemptContainerLaunchedEvent(task1Attempt2.getID(),runCount));
app.waitForState(task1Attempt1,TaskAttemptState.RUNNING);
app.waitForState(task1Attempt2,TaskAttemptState.RUNNING);
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
Assert.assertEquals("Reduce Task state not correct",TaskState.RUNNING,reduceTask.getReport().getTaskState());
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(task1Attempt1,TaskAttemptState.SUCCEEDED);
app.waitForState(mapTask1,TaskState.SUCCEEDED);
long task1StartTime=mapTask1.getReport().getStartTime();
long task1FinishTime=mapTask1.getReport().getFinishTime();
app.stop();
long am2StartTimeEst=System.currentTimeMillis();
app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true);
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
mapTask2=it.next();
reduceTask=it.next();
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.RUNNING);
task2Attempt=mapTask2.getAttempts().values().iterator().next();
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask2.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask2,TaskState.SUCCEEDED);
app.waitForState(reduceTask,TaskState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceTask.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertEquals("Job Start time not correct",jobStartTime,job.getReport().getStartTime());
Assert.assertEquals("Task Start time not correct",task1StartTime,mapTask1.getReport().getStartTime());
Assert.assertEquals("Task Finish time not correct",task1FinishTime,mapTask1.getReport().getFinishTime());
Assert.assertEquals(2,job.getAMInfos().size());
int attemptNum=1;
for ( AMInfo amInfo : job.getAMInfos()) {
Assert.assertEquals(attemptNum++,amInfo.getAppAttemptId().getAttemptId());
Assert.assertEquals(amInfo.getAppAttemptId(),amInfo.getContainerId().getApplicationAttemptId());
Assert.assertEquals(MRApp.NM_HOST,amInfo.getNodeManagerHost());
Assert.assertEquals(MRApp.NM_PORT,amInfo.getNodeManagerPort());
Assert.assertEquals(MRApp.NM_HTTP_PORT,amInfo.getNodeManagerHttpPort());
}
long am1StartTimeReal=job.getAMInfos().get(0).getStartTime();
long am2StartTimeReal=job.getAMInfos().get(1).getStartTime();
Assert.assertTrue(am1StartTimeReal >= am1StartTimeEst && am1StartTimeReal <= am2StartTimeEst);
Assert.assertTrue(am2StartTimeReal >= am2StartTimeEst && am2StartTimeReal <= System.currentTimeMillis());
}
InternalCallVerifierEqualityVerifier
/**
* This test case primarily verifies if the recovery is controlled through config
* property. In this case, recover is turned OFF. AM with 3 maps and 0 reduce.
* AM crashes after the first two tasks finishes and recovery fails and have
* to rerun fully in the second generation and succeeds.
* @throws Exception
*/
@Test public void testRecoveryFailsUsingCustomOutputCommitter() throws Exception {
int runCount=0;
MRApp app=new MRAppWithHistory(3,0,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setClass("mapred.output.committer.class",TestFileOutputCommitter.class,org.apache.hadoop.mapred.OutputCommitter.class);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setBoolean("want.am.recovery",false);
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask1=it.next();
Task mapTask2=it.next();
Task mapTask3=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
app.waitForState(mapTask2,TaskState.RUNNING);
app.waitForState(mapTask3,TaskState.RUNNING);
TaskAttempt task1Attempt=mapTask1.getAttempts().values().iterator().next();
TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next();
TaskAttempt task3Attempt=mapTask3.getAttempts().values().iterator().next();
app.waitForState(task1Attempt,TaskAttemptState.RUNNING);
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
app.waitForState(task3Attempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task2Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.SUCCEEDED);
app.stop();
app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setClass("mapred.output.committer.class",TestFileOutputCommitter.class,org.apache.hadoop.mapred.OutputCommitter.class);
conf.setBoolean("want.am.recovery",false);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setInt(MRJobConfig.NUM_REDUCES,0);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
mapTask2=it.next();
mapTask3=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
app.waitForState(mapTask2,TaskState.RUNNING);
app.waitForState(mapTask3,TaskState.RUNNING);
task3Attempt=mapTask3.getAttempts().values().iterator().next();
app.waitForState(task3Attempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask1.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask2.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask3.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask3,TaskState.SUCCEEDED);
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testCommandLine() throws Exception {
MyMRApp app=new MyMRApp(1,0,true,this.getClass().getName(),true);
Configuration conf=new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,true);
Job job=app.submit(conf);
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertEquals("[" + MRApps.crossPlatformify("JAVA_HOME") + "/bin/java"+ " -Djava.net.preferIPv4Stack=true"+ " -Dhadoop.metrics.log.level=WARN"+ " -Xmx200m -Djava.io.tmpdir="+ MRApps.crossPlatformify("PWD")+ "/tmp"+ " -Dlog4j.configuration=container-log4j.properties"+ " -Dyarn.app.container.log.dir="+ " -Dyarn.app.container.log.filesize=0"+ " -Dhadoop.root.logger=INFO,CLA"+ " org.apache.hadoop.mapred.YarnChild 127.0.0.1"+ " 54321"+ " attempt_0_0000_m_000000_0"+ " 0"+ " 1>/stdout"+ " 2>/stderr ]",app.myCommandLine);
Assert.assertTrue("HADOOP_ROOT_LOGGER not set for job",app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"));
Assert.assertEquals("INFO,console",app.cmdEnvironment.get("HADOOP_ROOT_LOGGER"));
Assert.assertTrue("HADOOP_CLIENT_OPTS not set for job",app.cmdEnvironment.containsKey("HADOOP_CLIENT_OPTS"));
Assert.assertEquals("",app.cmdEnvironment.get("HADOOP_CLIENT_OPTS"));
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testEnvironmentVariables() throws Exception {
MyMRApp app=new MyMRApp(1,0,true,this.getClass().getName(),true);
Configuration conf=new Configuration();
conf.set(JobConf.MAPRED_MAP_TASK_ENV,"HADOOP_CLIENT_OPTS=test");
conf.setStrings(MRJobConfig.MAP_LOG_LEVEL,"WARN");
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,false);
Job job=app.submit(conf);
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertTrue("HADOOP_ROOT_LOGGER not set for job",app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"));
Assert.assertEquals("WARN,console",app.cmdEnvironment.get("HADOOP_ROOT_LOGGER"));
Assert.assertTrue("HADOOP_CLIENT_OPTS not set for job",app.cmdEnvironment.containsKey("HADOOP_CLIENT_OPTS"));
Assert.assertEquals("test",app.cmdEnvironment.get("HADOOP_CLIENT_OPTS"));
app=new MyMRApp(1,0,true,this.getClass().getName(),true);
conf=new Configuration();
conf.set(JobConf.MAPRED_MAP_TASK_ENV,"HADOOP_ROOT_LOGGER=trace");
job=app.submit(conf);
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertTrue("HADOOP_ROOT_LOGGER not set for job",app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"));
Assert.assertEquals("trace",app.cmdEnvironment.get("HADOOP_ROOT_LOGGER"));
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierEqualityVerifier
@Test public void testIgnoreBlacklisting() throws Exception {
LOG.info("Running testIgnoreBlacklisting");
Configuration conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE,true);
conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER,1);
conf.setInt(MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT,33);
MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp app=rm.submitApp(1024);
dispatcher.await();
MockNM[] nodeManagers=new MockNM[10];
int nmNum=0;
List assigned=null;
nodeManagers[nmNum]=registerNodeManager(nmNum++,rm,dispatcher);
nodeManagers[0].nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job mockJob=mock(Job.class);
when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob);
assigned=getContainerOnHost(jobId,1,1024,new String[]{"h1"},nodeManagers[0],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
LOG.info("Failing container _1 on H1 (Node should be blacklisted and" + " ignore blacklisting enabled");
ContainerFailedEvent f1=createFailEvent(jobId,1,"h1",false);
allocator.sendFailure(f1);
assigned=getContainerOnHost(jobId,2,1024,new String[]{"h1"},nodeManagers[0],dispatcher,allocator,1,0,0,1,rm);
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
assigned=getContainerOnHost(jobId,2,1024,new String[]{"h1"},nodeManagers[0],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
nodeManagers[nmNum]=registerNodeManager(nmNum++,rm,dispatcher);
assigned=getContainerOnHost(jobId,3,1024,new String[]{"h2"},nodeManagers[1],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
nodeManagers[nmNum]=registerNodeManager(nmNum++,rm,dispatcher);
assigned=getContainerOnHost(jobId,4,1024,new String[]{"h3"},nodeManagers[2],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
assigned=getContainerOnHost(jobId,5,1024,new String[]{"h1"},nodeManagers[0],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
nodeManagers[nmNum]=registerNodeManager(nmNum++,rm,dispatcher);
assigned=getContainerOnHost(jobId,6,1024,new String[]{"h4"},nodeManagers[3],dispatcher,allocator,0,0,1,0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
assigned=getContainerOnHost(jobId,7,1024,new String[]{"h1"},nodeManagers[0],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
ContainerFailedEvent f2=createFailEvent(jobId,3,"h2",false);
allocator.sendFailure(f2);
assigned=getContainerOnHost(jobId,8,1024,new String[]{"h1"},nodeManagers[0],dispatcher,allocator,1,0,0,2,rm);
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
assigned=getContainerOnHost(jobId,8,1024,new String[]{"h1"},nodeManagers[0],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 2",2,assigned.size());
assigned=getContainerOnHost(jobId,9,1024,new String[]{"h2"},nodeManagers[1],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
ContainerFailedEvent f3=createFailEvent(jobId,4,"h3",false);
allocator.sendFailure(f3);
nodeManagers[nmNum]=registerNodeManager(nmNum++,rm,dispatcher);
assigned=getContainerOnHost(jobId,10,1024,new String[]{"h3"},nodeManagers[2],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
for (int i=0; i < 5; i++) {
nodeManagers[nmNum]=registerNodeManager(nmNum++,rm,dispatcher);
assigned=getContainerOnHost(jobId,11 + i,1024,new String[]{String.valueOf(5 + i)},nodeManagers[4 + i],dispatcher,allocator,0,0,(i == 4 ? 3 : 0),0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
}
assigned=getContainerOnHost(jobId,20,1024,new String[]{"h3"},nodeManagers[2],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
}
InternalCallVerifierEqualityVerifier
/**
* Test method 'singleJobCounter'. Should set SingleCounterPage class for rendering
*/
@Test public void testGetSingleJobCounter() throws IOException {
appController.singleJobCounter();
assertEquals(SingleCounterPage.class,appController.getClazz());
}
InternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test method 'singleTaskCounter'. Should set SingleCounterPage class for rendering
*/
@Test public void testGetSingleTaskCounter() throws IOException {
appController.singleTaskCounter();
assertEquals(SingleCounterPage.class,appController.getClazz());
assertNotNull(appController.getProperty().get(AppController.COUNTER_GROUP));
assertNotNull(appController.getProperty().get(AppController.COUNTER_NAME));
}
InternalCallVerifierEqualityVerifier
/**
* Test method 'job'. Should print message about error or set JobPage class for rendering
*/
@Test public void testGetJob(){
when(job.checkAccess(any(UserGroupInformation.class),any(JobACL.class))).thenReturn(false);
appController.job();
verify(appController.response()).setContentType(MimeType.TEXT);
assertEquals("Access denied: User user does not have permission to view job job_01_01",appController.getData());
when(job.checkAccess(any(UserGroupInformation.class),any(JobACL.class))).thenReturn(true);
appController.getProperty().remove(AMParams.JOB_ID);
appController.job();
assertEquals("Access denied: User user does not have permission to view job job_01_01Bad Request: Missing job ID",appController.getData());
appController.getProperty().put(AMParams.JOB_ID,"job_01_01");
appController.job();
assertEquals(JobPage.class,appController.getClazz());
}
InternalCallVerifierEqualityVerifier
/**
* Test method 'tasks'. Should set TasksPage class for rendering
*/
@Test public void testTasks(){
appController.tasks();
assertEquals(TasksPage.class,appController.getClazz());
}
InternalCallVerifierEqualityVerifier
/**
* Test method 'taskCounters'. Should print message about error or set CountersPage class for rendering
*/
@Test public void testGetTaskCounters(){
when(job.checkAccess(any(UserGroupInformation.class),any(JobACL.class))).thenReturn(false);
appController.taskCounters();
verify(appController.response()).setContentType(MimeType.TEXT);
assertEquals("Access denied: User user does not have permission to view job job_01_01",appController.getData());
when(job.checkAccess(any(UserGroupInformation.class),any(JobACL.class))).thenReturn(true);
appController.getProperty().remove(AMParams.TASK_ID);
appController.taskCounters();
assertEquals("Access denied: User user does not have permission to view job job_01_01missing task ID",appController.getData());
appController.getProperty().put(AMParams.TASK_ID,"task_01_01_m01_01");
appController.taskCounters();
assertEquals(CountersPage.class,appController.getClazz());
}
InternalCallVerifierEqualityVerifier
/**
* Test method 'conf'. Should set JobConfPage class for rendering
*/
@Test public void testConfiguration(){
appController.conf();
assertEquals(JobConfPage.class,appController.getClazz());
}
InternalCallVerifierEqualityVerifier
/**
* Test method 'conf'. Should set AttemptsPage class for rendering or print information about error
*/
@Test public void testAttempts(){
appController.getProperty().remove(AMParams.TASK_TYPE);
when(job.checkAccess(any(UserGroupInformation.class),any(JobACL.class))).thenReturn(false);
appController.attempts();
verify(appController.response()).setContentType(MimeType.TEXT);
assertEquals("Access denied: User user does not have permission to view job job_01_01",appController.getData());
when(job.checkAccess(any(UserGroupInformation.class),any(JobACL.class))).thenReturn(true);
appController.getProperty().remove(AMParams.TASK_ID);
appController.attempts();
assertEquals("Access denied: User user does not have permission to view job job_01_01",appController.getData());
appController.getProperty().put(AMParams.TASK_ID,"task_01_01_m01_01");
appController.attempts();
assertEquals("Bad request: missing task-type.",appController.getProperty().get("title"));
appController.getProperty().put(AMParams.TASK_TYPE,"m");
appController.attempts();
assertEquals("Bad request: missing attempt-state.",appController.getProperty().get("title"));
appController.getProperty().put(AMParams.ATTEMPT_STATE,"State");
appController.attempts();
assertEquals(AttemptsPage.class,appController.getClazz());
}
InternalCallVerifierEqualityVerifier
/**
* Test method 'task'. Should set TaskPage class for rendering and information for title
*/
@Test public void testTask(){
appController.task();
assertEquals("Attempts for task_01_01_m01_01",appController.getProperty().get("title"));
assertEquals(TaskPage.class,appController.getClazz());
}
InternalCallVerifierEqualityVerifier
/**
* Test method 'jobCounters'. Should print message about error or set CountersPage class for rendering
*/
@Test public void testGetJobCounters(){
when(job.checkAccess(any(UserGroupInformation.class),any(JobACL.class))).thenReturn(false);
appController.jobCounters();
verify(appController.response()).setContentType(MimeType.TEXT);
assertEquals("Access denied: User user does not have permission to view job job_01_01",appController.getData());
when(job.checkAccess(any(UserGroupInformation.class),any(JobACL.class))).thenReturn(true);
appController.getProperty().remove(AMParams.JOB_ID);
appController.jobCounters();
assertEquals("Access denied: User user does not have permission to view job job_01_01Bad Request: Missing job ID",appController.getData());
appController.getProperty().put(AMParams.JOB_ID,"job_01_01");
appController.jobCounters();
assertEquals(CountersPage.class,appController.getClazz());
}
InternalCallVerifierEqualityVerifier
/**
* Verify that all the events are flushed on stopping the HistoryHandler
* @throws Exception
*/
@Test public void testEventsFlushOnStop() throws Exception {
Configuration conf=new Configuration();
MRApp app=new MRAppWithSpecialHistoryHandler(1,0,true,this.getClass().getName(),true);
app.submit(conf);
Job job=app.getContext().getAllJobs().values().iterator().next();
JobId jobId=job.getID();
LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
app.waitForState(job,JobState.SUCCEEDED);
app.waitForState(Service.STATE.STOPPED);
HistoryContext context=new JobHistory();
((JobHistory)context).init(conf);
Job parsedJob=context.getJob(jobId);
Assert.assertEquals("CompletedMaps not correct",1,parsedJob.getCompletedMaps());
Map tasks=parsedJob.getTasks();
Assert.assertEquals("No of tasks not correct",1,tasks.size());
verifyTask(tasks.values().iterator().next());
Map maps=parsedJob.getTasks(TaskType.MAP);
Assert.assertEquals("No of maps not correct",1,maps.size());
Assert.assertEquals("Job state not currect",JobState.SUCCEEDED,parsedJob.getState());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testHistoryEvents() throws Exception {
Configuration conf=new Configuration();
MRApp app=new MRAppWithHistory(2,1,true,this.getClass().getName(),true);
app.submit(conf);
Job job=app.getContext().getAllJobs().values().iterator().next();
JobId jobId=job.getID();
LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
app.waitForState(job,JobState.SUCCEEDED);
app.waitForState(Service.STATE.STOPPED);
HistoryContext context=new JobHistory();
((JobHistory)context).init(conf);
((JobHistory)context).start();
Assert.assertTrue(context.getStartTime() > 0);
Assert.assertEquals(((JobHistory)context).getServiceState(),Service.STATE.STARTED);
Job parsedJob=context.getJob(jobId);
((JobHistory)context).stop();
Assert.assertEquals(((JobHistory)context).getServiceState(),Service.STATE.STOPPED);
Assert.assertEquals("CompletedMaps not correct",2,parsedJob.getCompletedMaps());
Assert.assertEquals(System.getProperty("user.name"),parsedJob.getUserName());
Map tasks=parsedJob.getTasks();
Assert.assertEquals("No of tasks not correct",3,tasks.size());
for ( Task task : tasks.values()) {
verifyTask(task);
}
Map maps=parsedJob.getTasks(TaskType.MAP);
Assert.assertEquals("No of maps not correct",2,maps.size());
Map reduces=parsedJob.getTasks(TaskType.REDUCE);
Assert.assertEquals("No of reduces not correct",1,reduces.size());
Assert.assertEquals("CompletedReduce not correct",1,parsedJob.getCompletedReduces());
Assert.assertEquals("Job state not currect",JobState.SUCCEEDED,parsedJob.getState());
}
InternalCallVerifierEqualityVerifier
@Test public void testGetGroups() throws Exception {
String user=UserGroupInformation.getCurrentUser().getUserName();
String[] args=new String[2];
args[0]="-getGroups";
args[1]=user;
int exitCode=hsAdminClient.run(args);
assertEquals("Exit code should be 0 but was: " + exitCode,0,exitCode);
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testRefreshUserToGroupsMappings() throws Exception {
String[] args=new String[]{"-refreshUserToGroupsMappings"};
Groups groups=Groups.getUserToGroupsMappingService(conf);
String user=UserGroupInformation.getCurrentUser().getUserName();
System.out.println("first attempt:");
List g1=groups.getGroups(user);
String[] str_groups=new String[g1.size()];
g1.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
System.out.println("second attempt, should be same:");
List g2=groups.getGroups(user);
g2.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i=0; i < g2.size(); i++) {
assertEquals("Should be same group ",g1.get(i),g2.get(i));
}
hsAdminClient.run(args);
System.out.println("third attempt(after refresh command), should be different:");
List g3=groups.getGroups(user);
g3.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i=0; i < g3.size(); i++) {
assertFalse("Should be different group: " + g1.get(i) + " and "+ g3.get(i),g1.get(i).equals(g3.get(i)));
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testJobsQueryFinishTimeBegin() throws JSONException, Exception {
WebResource r=resource();
Long now=System.currentTimeMillis();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeBegin",String.valueOf(now)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
assertEquals("incorrect number of elements",3,arr.length());
}
InternalCallVerifierEqualityVerifier
@Test public void testJobsQueryUserNone() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("user","bogus").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
assertEquals("jobs is not null",JSONObject.NULL,json.get("jobs"));
}
InternalCallVerifierEqualityVerifier
@Test public void testJobsQueryFinishTimeEndNegative() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeEnd",String.valueOf(-1000)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: finishedTimeEnd must be greater than 0",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
InternalCallVerifierEqualityVerifier
@Test public void testJobsQueryQueueNonExist() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("queue","bogus").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
assertEquals("jobs is not null",JSONObject.NULL,json.get("jobs"));
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testJobsQueryFinishTimeBeginEnd() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
int size=jobsMap.size();
ArrayList finishTime=new ArrayList(size);
for ( Map.Entry entry : jobsMap.entrySet()) {
finishTime.add(entry.getValue().getReport().getFinishTime());
}
Collections.sort(finishTime);
assertTrue("Error we must have atleast 3 jobs",size >= 3);
long midFinishTime=finishTime.get(size - 2);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeBegin",String.valueOf(40000)).queryParam("finishedTimeEnd",String.valueOf(midFinishTime)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
assertEquals("incorrect number of elements",size - 1,arr.length());
}
InternalCallVerifierEqualityVerifier
@Test public void testJobsQueryFinishTimeEndInvalidformat() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeEnd","efsd").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: Invalid number format: For input string: \"efsd\"",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
InternalCallVerifierEqualityVerifier
@Test public void testJobsQueryFinishTimeInvalidformat() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeBegin","efsd").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: Invalid number format: For input string: \"efsd\"",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
InternalCallVerifierEqualityVerifier
@Test public void testJobsQueryStartTimeInvalidformat() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeBegin","efsd").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: Invalid number format: For input string: \"efsd\"",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
InternalCallVerifierEqualityVerifier
@Test public void testJobsQueryStartTimeEndInvalidformat() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeEnd","efsd").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: Invalid number format: For input string: \"efsd\"",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
InternalCallVerifierEqualityVerifier
@Test public void testJobsQueryLimit() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("limit","2").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
assertEquals("incorrect number of elements",2,arr.length());
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testJobsQueryStartTimeEnd() throws JSONException, Exception {
WebResource r=resource();
Long now=System.currentTimeMillis();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeEnd",String.valueOf(now)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
assertEquals("incorrect number of elements",3,arr.length());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testJobsQueryStartTimeBeginEnd() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
int size=jobsMap.size();
ArrayList startTime=new ArrayList(size);
for ( Map.Entry entry : jobsMap.entrySet()) {
startTime.add(entry.getValue().getReport().getStartTime());
}
Collections.sort(startTime);
assertTrue("Error we must have atleast 3 jobs",size >= 3);
long midStartTime=startTime.get(size - 2);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeBegin",String.valueOf(40000)).queryParam("startedTimeEnd",String.valueOf(midStartTime)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
assertEquals("incorrect number of elements",size - 1,arr.length());
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testJobsQueryFinishTimeBeginEndInvalid() throws JSONException, Exception {
WebResource r=resource();
Long now=System.currentTimeMillis();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeBegin",String.valueOf(now)).queryParam("finishedTimeEnd",String.valueOf(40000)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: finishedTimeEnd must be greater than finishedTimeBegin",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
InternalCallVerifierEqualityVerifier
@Test public void testJobsQueryStartTimeEndNegative() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeEnd",String.valueOf(-1000)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: startedTimeEnd must be greater than 0",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
InternalCallVerifierEqualityVerifier
@Test public void testJobsQueryUser() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("user","mock").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
System.out.println(json.toString());
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
assertEquals("incorrect number of elements",3,arr.length());
JSONObject info=arr.getJSONObject(0);
Job job=appContext.getPartialJob(MRApps.toJobID(info.getString("id")));
VerifyJobsUtils.verifyHsJobPartial(info,job);
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testJobsQueryState() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
String queryState="BOGUS";
JobId jid=null;
for ( Map.Entry entry : jobsMap.entrySet()) {
jid=entry.getValue().getID();
queryState=entry.getValue().getState().toString();
break;
}
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("state",queryState).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
assertEquals("incorrect number of elements",1,arr.length());
JSONObject info=arr.getJSONObject(0);
Job job=appContext.getPartialJob(jid);
VerifyJobsUtils.verifyHsJobPartial(info,job);
}
InternalCallVerifierEqualityVerifier
@Test public void testJobsQueryFinishTimeBeginNegative() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeBegin",String.valueOf(-1000)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: finishedTimeBegin must be greater than 0",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
InternalCallVerifierEqualityVerifier
@Test public void testJobsQueryLimitInvalid() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("limit","-1").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: limit value must be greater then 0",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testJobsQueryStartTimeBegin() throws JSONException, Exception {
WebResource r=resource();
Long now=System.currentTimeMillis();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeBegin",String.valueOf(now)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
assertEquals("jobs is not null",JSONObject.NULL,json.get("jobs"));
}
InternalCallVerifierEqualityVerifier
@Test public void testJobsQueryStartTimeNegative() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeBegin",String.valueOf(-1000)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: startedTimeBegin must be greater than 0",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testJobsQueryStateNone() throws JSONException, Exception {
WebResource r=resource();
ArrayList JOB_STATES=new ArrayList(Arrays.asList(JobState.values()));
Map jobsMap=appContext.getAllJobs();
for ( Map.Entry entry : jobsMap.entrySet()) {
JOB_STATES.remove(entry.getValue().getState());
}
assertTrue("No unused job states",JOB_STATES.size() > 0);
JobState notInUse=JOB_STATES.get(0);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("state",notInUse.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
assertEquals("jobs is not null",JSONObject.NULL,json.get("jobs"));
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testJobsQueryFinishTimeEnd() throws JSONException, Exception {
WebResource r=resource();
Long now=System.currentTimeMillis();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeEnd",String.valueOf(now)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
assertEquals("jobs is not null",JSONObject.NULL,json.get("jobs"));
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testJobsQueryStartTimeBeginEndInvalid() throws JSONException, Exception {
WebResource r=resource();
Long now=System.currentTimeMillis();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeBegin",String.valueOf(now)).queryParam("startedTimeEnd",String.valueOf(40000)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: startedTimeEnd must be greater than startTimeBegin",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
InternalCallVerifierEqualityVerifier
@Test public void testJobsQueryQueue() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("queue","mockqueue").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
assertEquals("incorrect number of elements",3,arr.length());
}
APIUtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testJobHistoryFileNameBackwardsCompatible() throws IOException {
JobID oldJobId=JobID.forName(JOB_ID);
JobId jobId=TypeConverter.toYarn(oldJobId);
long submitTime=Long.parseLong(SUBMIT_TIME);
long finishTime=Long.parseLong(FINISH_TIME);
int numMaps=Integer.parseInt(NUM_MAPS);
int numReduces=Integer.parseInt(NUM_REDUCES);
String jobHistoryFile=String.format(OLD_JOB_HISTORY_FILE_FORMATTER,JOB_ID,SUBMIT_TIME,USER_NAME,JOB_NAME,FINISH_TIME,NUM_MAPS,NUM_REDUCES,JOB_STATUS);
JobIndexInfo info=FileNameIndexUtils.getIndexInfo(jobHistoryFile);
Assert.assertEquals("Job id incorrect after decoding old history file",jobId,info.getJobId());
Assert.assertEquals("Submit time incorrect after decoding old history file",submitTime,info.getSubmitTime());
Assert.assertEquals("User incorrect after decoding old history file",USER_NAME,info.getUser());
Assert.assertEquals("Job name incorrect after decoding old history file",JOB_NAME,info.getJobName());
Assert.assertEquals("Finish time incorrect after decoding old history file",finishTime,info.getFinishTime());
Assert.assertEquals("Num maps incorrect after decoding old history file",numMaps,info.getNumMaps());
Assert.assertEquals("Num reduces incorrect after decoding old history file",numReduces,info.getNumReduces());
Assert.assertEquals("Job status incorrect after decoding old history file",JOB_STATUS,info.getJobStatus());
Assert.assertNull("Queue name incorrect after decoding old history file",info.getQueueName());
}
APIUtilityVerifierEqualityVerifier
@Test public void testUserNamePercentDecoding() throws IOException {
String jobHistoryFile=String.format(JOB_HISTORY_FILE_FORMATTER,JOB_ID,SUBMIT_TIME,USER_NAME_WITH_DELIMITER_ESCAPE,JOB_NAME,FINISH_TIME,NUM_MAPS,NUM_REDUCES,JOB_STATUS,QUEUE_NAME,JOB_START_TIME);
JobIndexInfo info=FileNameIndexUtils.getIndexInfo(jobHistoryFile);
Assert.assertEquals("User name doesn't match",USER_NAME_WITH_DELIMITER,info.getUser());
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testEncodingDecodingEquivalence() throws IOException {
JobIndexInfo info=new JobIndexInfo();
JobID oldJobId=JobID.forName(JOB_ID);
JobId jobId=TypeConverter.toYarn(oldJobId);
info.setJobId(jobId);
info.setSubmitTime(Long.parseLong(SUBMIT_TIME));
info.setUser(USER_NAME);
info.setJobName(JOB_NAME);
info.setFinishTime(Long.parseLong(FINISH_TIME));
info.setNumMaps(Integer.parseInt(NUM_MAPS));
info.setNumReduces(Integer.parseInt(NUM_REDUCES));
info.setJobStatus(JOB_STATUS);
info.setQueueName(QUEUE_NAME);
info.setJobStartTime(Long.parseLong(JOB_START_TIME));
String jobHistoryFile=FileNameIndexUtils.getDoneFileName(info);
JobIndexInfo parsedInfo=FileNameIndexUtils.getIndexInfo(jobHistoryFile);
Assert.assertEquals("Job id different after encoding and decoding",info.getJobId(),parsedInfo.getJobId());
Assert.assertEquals("Submit time different after encoding and decoding",info.getSubmitTime(),parsedInfo.getSubmitTime());
Assert.assertEquals("User different after encoding and decoding",info.getUser(),parsedInfo.getUser());
Assert.assertEquals("Job name different after encoding and decoding",info.getJobName(),parsedInfo.getJobName());
Assert.assertEquals("Finish time different after encoding and decoding",info.getFinishTime(),parsedInfo.getFinishTime());
Assert.assertEquals("Num maps different after encoding and decoding",info.getNumMaps(),parsedInfo.getNumMaps());
Assert.assertEquals("Num reduces different after encoding and decoding",info.getNumReduces(),parsedInfo.getNumReduces());
Assert.assertEquals("Job status different after encoding and decoding",info.getJobStatus(),parsedInfo.getJobStatus());
Assert.assertEquals("Queue name different after encoding and decoding",info.getQueueName(),parsedInfo.getQueueName());
Assert.assertEquals("Job start time different after encoding and decoding",info.getJobStartTime(),parsedInfo.getJobStartTime());
}
APIUtilityVerifierEqualityVerifier
@Test public void testJobNamePercentDecoding() throws IOException {
String jobHistoryFile=String.format(JOB_HISTORY_FILE_FORMATTER,JOB_ID,SUBMIT_TIME,USER_NAME,JOB_NAME_WITH_DELIMITER_ESCAPE,FINISH_TIME,NUM_MAPS,NUM_REDUCES,JOB_STATUS,QUEUE_NAME,JOB_START_TIME);
JobIndexInfo info=FileNameIndexUtils.getIndexInfo(jobHistoryFile);
Assert.assertEquals("Job name doesn't match",JOB_NAME_WITH_DELIMITER,info.getJobName());
}
APIUtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
@Test(timeout=120000) public void testGetJobFileWithUser(){
Configuration conf=new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,"/my/path/to/staging");
String jobFile=MRApps.getJobFile(conf,"dummy-user",new JobID("dummy-job",12345));
assertNotNull("getJobFile results in null.",jobFile);
assertEquals("jobFile with specified user is not as expected.","/my/path/to/staging/dummy-user/.staging/job_dummy-job_12345/job.xml",jobFile);
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=120000) public void testSetClasspathWithJobClassloader() throws IOException {
Configuration conf=new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,true);
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER,true);
Map env=new HashMap();
MRApps.setClasspath(env,conf);
String cp=env.get("CLASSPATH");
String appCp=env.get("APP_CLASSPATH");
assertFalse("MAPREDUCE_JOB_CLASSLOADER true, but job.jar is in the" + " classpath!",cp.contains("jar" + ApplicationConstants.CLASS_PATH_SEPARATOR + "job"));
assertFalse("MAPREDUCE_JOB_CLASSLOADER true, but PWD is in the classpath!",cp.contains("PWD"));
String expectedAppClasspath=StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,Arrays.asList(ApplicationConstants.Environment.PWD.$$(),"job.jar/job.jar","job.jar/classes/","job.jar/lib/*",ApplicationConstants.Environment.PWD.$$() + "/*"));
assertEquals("MAPREDUCE_JOB_CLASSLOADER true, but job.jar is not in the app" + " classpath!",expectedAppClasspath,appCp);
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testHangingSink(){
new ConfigBuilder().add("*.period",8).add("test.sink.test.class",TestSink.class.getName()).add("test.sink.hanging.retry.delay","1").add("test.sink.hanging.retry.backoff","1.01").add("test.sink.hanging.retry.count","0").save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms=new MetricsSystemImpl("Test");
ms.start();
TestSource s=ms.register("s3","s3 desc",new TestSource("s3rec"));
s.c1.incr();
HangingSink hanging=new HangingSink();
ms.registerSink("hanging","Hang the sink!",hanging);
ms.publishMetricsNow();
assertEquals(1L,ms.droppedPubAll.value());
assertFalse(hanging.getInterrupted());
ms.stop();
ms.shutdown();
assertTrue(hanging.getInterrupted());
assertTrue("The sink didn't get called after its first hang " + "for subsequent records.",hanging.getGotCalledSecondTime());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test concurrent consumer access, which is illegal
* @throws Exception
*/
@Test public void testConcurrentConsumers() throws Exception {
final SinkQueue q=newSleepingConsumerQueue(2,1);
assertTrue("should enqueue",q.enqueue(2));
assertEquals("queue back",2,(int)q.back());
assertTrue("should drop",!q.enqueue(3));
shouldThrowCME(new Fun(){
@Override public void run(){
q.clear();
}
}
);
shouldThrowCME(new Fun(){
@Override public void run() throws Exception {
q.consume(null);
}
}
);
shouldThrowCME(new Fun(){
@Override public void run() throws Exception {
q.consumeAll(null);
}
}
);
shouldThrowCME(new Fun(){
@Override public void run() throws Exception {
q.dequeue();
}
}
);
assertEquals("queue size",2,q.size());
assertEquals("queue front",1,(int)q.front());
assertEquals("queue back",2,(int)q.back());
}
InternalCallVerifierIdentityVerifierEqualityVerifierHybridVerifier
/**
* Test the consumer throwing exceptions
* @throws Exception
*/
@Test public void testConsumerException() throws Exception {
final SinkQueue q=new SinkQueue(1);
final RuntimeException ex=new RuntimeException("expected");
q.enqueue(1);
try {
q.consume(new Consumer(){
@Override public void consume( Integer e){
throw ex;
}
}
);
}
catch ( Exception expected) {
assertSame("consumer exception",ex,expected);
}
assertEquals("queue size",1,q.size());
assertEquals("element",1,(int)q.dequeue());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test common use case
* @throws Exception
*/
@Test public void testCommon() throws Exception {
final SinkQueue q=new SinkQueue(2);
q.enqueue(1);
assertEquals("queue front",1,(int)q.front());
assertEquals("queue back",1,(int)q.back());
assertEquals("element",1,(int)q.dequeue());
assertTrue("should enqueue",q.enqueue(2));
q.consume(new Consumer(){
@Override public void consume( Integer e){
assertEquals("element",2,(int)e);
}
}
);
assertTrue("should enqueue",q.enqueue(3));
assertEquals("element",3,(int)q.dequeue());
assertEquals("queue size",0,q.size());
assertEquals("queue front",null,q.front());
assertEquals("queue back",null,q.back());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test nonblocking enqueue when queue is full
* @throws Exception
*/
@Test public void testFull() throws Exception {
final SinkQueue q=new SinkQueue(1);
q.enqueue(1);
assertTrue("should drop",!q.enqueue(2));
assertEquals("element",1,(int)q.dequeue());
q.enqueue(3);
q.consume(new Consumer(){
@Override public void consume( Integer e){
assertEquals("element",3,(int)e);
}
}
);
assertEquals("queue size",0,q.size());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test consumers that take their time.
* @throws Exception
*/
@Test public void testHangingConsumer() throws Exception {
SinkQueue q=newSleepingConsumerQueue(2,1,2);
assertEquals("queue back",2,(int)q.back());
assertTrue("should drop",!q.enqueue(3));
assertEquals("queue size",2,q.size());
assertEquals("queue head",1,(int)q.front());
assertEquals("queue back",2,(int)q.back());
}
IterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test the consumeAll method
* @throws Exception
*/
@Test public void testConsumeAll() throws Exception {
final int capacity=64;
final SinkQueue q=new SinkQueue(capacity);
for (int i=0; i < capacity; ++i) {
assertTrue("should enqueue",q.enqueue(i));
}
assertTrue("should not enqueue",!q.enqueue(capacity));
final Runnable trigger=mock(Runnable.class);
q.consumeAll(new Consumer(){
private int expected=0;
@Override public void consume( Integer e){
assertEquals("element",expected++,(int)e);
trigger.run();
}
}
);
verify(trigger,times(capacity)).run();
}
InternalCallVerifierEqualityVerifier
/**
* Test the clear method
*/
@Test public void testClear(){
final SinkQueue q=new SinkQueue(128);
for (int i=0; i < q.capacity() + 97; ++i) {
q.enqueue(i);
}
assertEquals("queue size",q.capacity(),q.size());
q.clear();
assertEquals("queue size",0,q.size());
}
InternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Check that counts and quantile estimates are correctly reset after a call
* to {@link SampleQuantiles#clear()}.
*/
@Test public void testClear() throws IOException {
for (int i=0; i < 1000; i++) {
estimator.insert(i);
}
estimator.clear();
assertEquals(estimator.getCount(),0);
assertEquals(estimator.getSampleCount(),0);
assertNull(estimator.snapshot());
}
InternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Check that the counts of the number of items in the window and sample are
* incremented correctly as items are added.
*/
@Test public void testCount() throws IOException {
assertEquals(estimator.getCount(),0);
assertEquals(estimator.getSampleCount(),0);
assertNull(estimator.snapshot());
estimator.insert(1337);
assertEquals(estimator.getCount(),1);
estimator.snapshot();
assertEquals(estimator.getSampleCount(),1);
assertEquals("50.00 %ile +/- 5.00%: 1337\n" + "75.00 %ile +/- 2.50%: 1337\n" + "90.00 %ile +/- 1.00%: 1337\n"+ "95.00 %ile +/- 0.50%: 1337\n"+ "99.00 %ile +/- 0.10%: 1337",estimator.toString());
}
UtilityVerifierEqualityVerifierHybridVerifier
/**
* Get the IP addresses of an unknown interface
*/
@Test public void testIPsOfUnknownInterface() throws Exception {
try {
DNS.getIPs("name-of-an-unknown-interface");
fail("Got an IP for a bogus interface");
}
catch ( UnknownHostException e) {
assertEquals("No such interface name-of-an-unknown-interface",e.getMessage());
}
}
APIUtilityVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test that repeated calls to getting the local host are fairly fast, and
* hence that caching is being used
* @throws Exception if hostname lookups fail
*/
@Test public void testGetLocalHostIsFast() throws Exception {
String hostname1=DNS.getDefaultHost(DEFAULT);
assertNotNull(hostname1);
String hostname2=DNS.getDefaultHost(DEFAULT);
long t1=Time.now();
String hostname3=DNS.getDefaultHost(DEFAULT);
long t2=Time.now();
assertEquals(hostname3,hostname2);
assertEquals(hostname2,hostname1);
long interval=t2 - t1;
assertTrue("Took too long to determine local host - caching is not working",interval < 20000);
}
APIUtilityVerifierEqualityVerifier
/**
* Test the "default" IP addresses is the local IP addr
*/
@Test public void testGetIPWithDefault() throws Exception {
String[] ips=DNS.getIPs(DEFAULT);
assertEquals("Should only return 1 default IP",1,ips.length);
assertEquals(getLocalIPAddr().getHostAddress(),ips[0].toString());
String ip=DNS.getDefaultIP(DEFAULT);
assertEquals(ip,ips[0].toString());
}
Class: org.apache.hadoop.net.TestNetUtils
EqualityVerifier
@Test public void testCanonicalUriWithNoHost(){
URI uri=NetUtils.getCanonicalUri(URI.create("scheme://:123/path"),2);
assertEquals("scheme://:123/path",uri.toString());
}
EqualityVerifier
@Test public void testCanonicalUriWithNoAuthority(){
URI uri;
uri=NetUtils.getCanonicalUri(URI.create("scheme:/"),2);
assertEquals("scheme:/",uri.toString());
uri=NetUtils.getCanonicalUri(URI.create("scheme:/path"),2);
assertEquals("scheme:/path",uri.toString());
uri=NetUtils.getCanonicalUri(URI.create("scheme:///"),2);
assertEquals("scheme:///",uri.toString());
uri=NetUtils.getCanonicalUri(URI.create("scheme:///path"),2);
assertEquals("scheme:///path",uri.toString());
}
UtilityVerifierEqualityVerifierHybridVerifier
@Test public void testCreateSocketAddress() throws Throwable {
InetSocketAddress addr=NetUtils.createSocketAddr("127.0.0.1:12345",1000,"myconfig");
assertEquals("127.0.0.1",addr.getAddress().getHostAddress());
assertEquals(12345,addr.getPort());
addr=NetUtils.createSocketAddr("127.0.0.1",1000,"myconfig");
assertEquals("127.0.0.1",addr.getAddress().getHostAddress());
assertEquals(1000,addr.getPort());
try {
addr=NetUtils.createSocketAddr("127.0.0.1:blahblah",1000,"myconfig");
fail("Should have failed to parse bad port");
}
catch ( IllegalArgumentException iae) {
assertInException(iae,"myconfig");
}
}
EqualityVerifier
@Test public void testCanonicalUriWithDefaultPort(){
URI uri;
uri=NetUtils.getCanonicalUri(URI.create("scheme://host"),123);
assertEquals("scheme://host.a.b:123",uri.toString());
uri=NetUtils.getCanonicalUri(URI.create("scheme://host/"),123);
assertEquals("scheme://host.a.b:123/",uri.toString());
uri=NetUtils.getCanonicalUri(URI.create("scheme://host/path"),123);
assertEquals("scheme://host.a.b:123/path",uri.toString());
uri=NetUtils.getCanonicalUri(URI.create("scheme://host/path?q#frag"),123);
assertEquals("scheme://host.a.b:123/path?q#frag",uri.toString());
}
EqualityVerifier
@Test public void testCanonicalUriWithNoPortNoDefaultPort(){
URI uri=NetUtils.getCanonicalUri(URI.create("scheme://host/path"),-1);
assertEquals("scheme://host.a.b/path",uri.toString());
}
EqualityVerifier
@Test public void testCanonicalUriWithPath(){
URI uri;
uri=NetUtils.getCanonicalUri(URI.create("path"),2);
assertEquals("path",uri.toString());
uri=NetUtils.getCanonicalUri(URI.create("/path"),2);
assertEquals("/path",uri.toString());
}
EqualityVerifier
@Test public void testCanonicalUriWithPort(){
URI uri;
uri=NetUtils.getCanonicalUri(URI.create("scheme://host:123"),456);
assertEquals("scheme://host.a.b:123",uri.toString());
uri=NetUtils.getCanonicalUri(URI.create("scheme://host:123/"),456);
assertEquals("scheme://host.a.b:123/",uri.toString());
uri=NetUtils.getCanonicalUri(URI.create("scheme://host:123/path"),456);
assertEquals("scheme://host.a.b:123/path",uri.toString());
uri=NetUtils.getCanonicalUri(URI.create("scheme://host:123/path?q#frag"),456);
assertEquals("scheme://host.a.b:123/path?q#frag",uri.toString());
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test for {@link NetUtils#normalizeHostNames}
*/
@Test public void testNormalizeHostName(){
List hosts=Arrays.asList(new String[]{"127.0.0.1","localhost","1.kanyezone.appspot.com","UnknownHost123"});
List normalizedHosts=NetUtils.normalizeHostNames(hosts);
assertEquals(normalizedHosts.get(0),hosts.get(0));
assertFalse(normalizedHosts.get(1).equals(hosts.get(1)));
assertEquals(normalizedHosts.get(1),hosts.get(0));
assertFalse(normalizedHosts.get(2).equals(hosts.get(2)));
assertEquals(normalizedHosts.get(3),hosts.get(3));
}
Class: org.apache.hadoop.net.TestNetworkTopology
EqualityVerifier
@Test public void testNumOfChildren() throws Exception {
assertEquals(cluster.getNumOfLeaves(),dataNodes.length);
}
IterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testRemove() throws Exception {
for (int i=0; i < dataNodes.length; i++) {
cluster.remove(dataNodes[i]);
}
for (int i=0; i < dataNodes.length; i++) {
assertFalse(cluster.contains(dataNodes[i]));
}
assertEquals(0,cluster.getNumOfLeaves());
for (int i=0; i < dataNodes.length; i++) {
cluster.add(dataNodes[i]);
}
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test(timeout=180000) public void testInvalidNetworkTopologiesNotCachedInHdfs() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
try {
String racks[]={"/a/b","/c"};
String hosts[]={"foo1.example.com","foo2.example.com"};
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).racks(racks).hosts(hosts).build();
cluster.waitActive();
NamenodeProtocols nn=cluster.getNameNodeRpc();
Assert.assertNotNull(nn);
DatanodeInfo[] info;
while (true) {
info=nn.getDatanodeReport(DatanodeReportType.LIVE);
Assert.assertFalse(info.length == 2);
if (info.length == 1) {
break;
}
Thread.sleep(1000);
}
int validIdx=info[0].getHostName().equals(hosts[0]) ? 0 : 1;
int invalidIdx=validIdx == 1 ? 0 : 1;
StaticMapping.addNodeToRack(hosts[invalidIdx],racks[validIdx]);
LOG.info("datanode " + validIdx + " came up with network location "+ info[0].getNetworkLocation());
cluster.restartDataNode(invalidIdx);
Thread.sleep(5000);
while (true) {
info=nn.getDatanodeReport(DatanodeReportType.LIVE);
if (info.length == 2) {
break;
}
if (info.length == 0) {
LOG.info("got no valid DNs");
}
else if (info.length == 1) {
LOG.info("got one valid DN: " + info[0].getHostName() + " (at "+ info[0].getNetworkLocation()+ ")");
}
Thread.sleep(1000);
}
Assert.assertEquals(info[0].getNetworkLocation(),info[1].getNetworkLocation());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifierIterativeVerifierBranchVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* This test checks that chooseRandom works for an excluded rack.
*/
@Test public void testChooseRandomExcludedRack(){
Map frequency=pickNodesAtRandom(100,"~" + "/d2");
for (int j=0; j < dataNodes.length; j++) {
int freq=frequency.get(dataNodes[j]);
if (dataNodes[j].getNetworkLocation().startsWith("/d2")) {
assertEquals(0,freq);
}
else {
assertTrue(freq > 0);
}
}
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test setting some server options.
* @throws IOException
*/
@Test(timeout=180000) public void testServerOptions() throws Exception {
final String TEST_PATH=new File(sockDir.getDir(),"test_sock_server_options").getAbsolutePath();
DomainSocket serv=DomainSocket.bindAndListen(TEST_PATH);
try {
int bufSize=serv.getAttribute(DomainSocket.RECEIVE_BUFFER_SIZE);
int newBufSize=bufSize / 2;
serv.setAttribute(DomainSocket.RECEIVE_BUFFER_SIZE,newBufSize);
int nextBufSize=serv.getAttribute(DomainSocket.RECEIVE_BUFFER_SIZE);
Assert.assertEquals(newBufSize,nextBufSize);
int newTimeout=1000;
serv.setAttribute(DomainSocket.RECEIVE_TIMEOUT,newTimeout);
int nextTimeout=serv.getAttribute(DomainSocket.RECEIVE_TIMEOUT);
Assert.assertEquals(newTimeout,nextTimeout);
try {
serv.accept();
Assert.fail("expected the accept() to time out and fail");
}
catch ( SocketTimeoutException e) {
GenericTestUtils.assertExceptionContains("accept(2) error: ",e);
}
}
finally {
serv.close();
Assert.assertFalse(serv.isOpen());
}
}
EqualityVerifier
/**
* Test DomainSocket path setting and getting.
* @throws IOException
*/
@Test(timeout=180000) public void testSocketPathSetGet() throws IOException {
Assert.assertEquals("/var/run/hdfs/sock.100",DomainSocket.getEffectivePath("/var/run/hdfs/sock._PORT",100));
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* Test that we get a read result of -1 on EOF.
* @throws IOException
*/
@Test(timeout=180000) public void testSocketReadEof() throws Exception {
final String TEST_PATH=new File(sockDir.getDir(),"testSocketReadEof").getAbsolutePath();
final DomainSocket serv=DomainSocket.bindAndListen(TEST_PATH);
ExecutorService exeServ=Executors.newSingleThreadExecutor();
Callable callable=new Callable(){
public Void call(){
DomainSocket conn;
try {
conn=serv.accept();
}
catch ( IOException e) {
throw new RuntimeException("unexpected IOException",e);
}
byte buf[]=new byte[100];
for (int i=0; i < buf.length; i++) {
buf[i]=0;
}
try {
Assert.assertEquals(-1,conn.getInputStream().read());
}
catch ( IOException e) {
throw new RuntimeException("unexpected IOException",e);
}
return null;
}
}
;
Future future=exeServ.submit(callable);
DomainSocket conn=DomainSocket.connect(serv.getPath());
Thread.sleep(50);
conn.close();
serv.close();
future.get(2,TimeUnit.MINUTES);
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=180000) public void testShutdown() throws Exception {
final AtomicInteger bytesRead=new AtomicInteger(0);
final AtomicBoolean failed=new AtomicBoolean(false);
final DomainSocket[] socks=DomainSocket.socketpair();
Runnable reader=new Runnable(){
@Override public void run(){
while (true) {
try {
int ret=socks[1].getInputStream().read();
if (ret == -1) return;
bytesRead.addAndGet(1);
}
catch ( IOException e) {
DomainSocket.LOG.error("reader error",e);
failed.set(true);
return;
}
}
}
}
;
Thread readerThread=new Thread(reader);
readerThread.start();
socks[0].getOutputStream().write(1);
socks[0].getOutputStream().write(2);
socks[0].getOutputStream().write(3);
Assert.assertTrue(readerThread.isAlive());
socks[0].shutdown();
readerThread.join();
Assert.assertFalse(failed.get());
Assert.assertEquals(3,bytesRead.get());
IOUtils.cleanup(null,socks);
}
Class: org.apache.hadoop.nfs.TestNfsExports
InternalCallVerifierEqualityVerifier
@Test public void testMultiMatchers() throws Exception {
long shortExpirationPeriod=1 * 1000 * 1000* 1000;
NfsExports matcher=new NfsExports(CacheSize,shortExpirationPeriod,"192.168.0.[0-9]+;[a-z]+.b.com rw");
Assert.assertEquals(AccessPrivilege.READ_ONLY,matcher.getAccessPrivilege(address1,hostname2));
Assert.assertEquals(AccessPrivilege.READ_ONLY,matcher.getAccessPrivilege(address1,address1));
Assert.assertEquals(AccessPrivilege.READ_ONLY,matcher.getAccessPrivilege(address1,hostname1));
Assert.assertEquals(AccessPrivilege.READ_WRITE,matcher.getAccessPrivilege(address2,hostname1));
Assert.assertEquals(AccessPrivilege.READ_WRITE,matcher.getAccessPrivilege(address2,hostname2));
Thread.sleep(1000);
AccessPrivilege ap;
long startNanos=System.nanoTime();
do {
ap=matcher.getAccessPrivilege(address2,address2);
if (ap == AccessPrivilege.NONE) {
break;
}
Thread.sleep(500);
}
while ((System.nanoTime() - startNanos) / NanosPerMillis < 5000);
Assert.assertEquals(AccessPrivilege.NONE,ap);
}
APIUtilityVerifierEqualityVerifier
@Test public void testStaticMapParsing() throws IOException {
File tempStaticMapFile=File.createTempFile("nfs-",".map");
final String staticMapFileContents="uid 10 100\n" + "gid 10 200\n" + "uid 11 201 # comment at the end of a line\n"+ "uid 12 301\n"+ "# Comment at the beginning of a line\n"+ " # Comment that starts late in the line\n"+ "uid 10000 10001# line without whitespace before comment\n"+ "uid 13 302\n"+ "gid\t11\t201\n"+ "\n"+ "gid 12 202";
OutputStream out=new FileOutputStream(tempStaticMapFile);
out.write(staticMapFileContents.getBytes());
out.close();
StaticMapping parsedMap=IdUserGroup.parseStaticMap(tempStaticMapFile);
assertEquals(10,(int)parsedMap.uidMapping.get(100));
assertEquals(11,(int)parsedMap.uidMapping.get(201));
assertEquals(12,(int)parsedMap.uidMapping.get(301));
assertEquals(13,(int)parsedMap.uidMapping.get(302));
assertEquals(10,(int)parsedMap.gidMapping.get(200));
assertEquals(11,(int)parsedMap.gidMapping.get(201));
assertEquals(12,(int)parsedMap.gidMapping.get(202));
assertEquals(10000,(int)parsedMap.uidMapping.get(10001));
assertEquals(1000,(int)parsedMap.uidMapping.get(1000));
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testNegativeGroupCaching() throws Exception {
final String user="negcache";
final String failMessage="Did not throw IOException: ";
conf.setLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS,2);
FakeTimer timer=new FakeTimer();
Groups groups=new Groups(conf,timer);
groups.cacheGroupsAdd(Arrays.asList(myGroups));
groups.refresh();
FakeGroupMapping.addToBlackList(user);
try {
groups.getGroups(user);
fail(failMessage + "Failed to obtain groups from FakeGroupMapping.");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("No groups found for user",e);
}
try {
groups.getGroups(user);
fail(failMessage + "The user is in the negative cache.");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("No groups found for user",e);
}
FakeGroupMapping.clearBlackList();
try {
groups.getGroups(user);
fail(failMessage + "The user is still in the negative cache, even " + "FakeGroupMapping has resumed.");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("No groups found for user",e);
}
timer.advance(4 * 1000);
assertEquals(Arrays.asList(myGroups),groups.getGroups(user));
}
APIUtilityVerifierIterativeVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testGroupMappingRefresh() throws Exception {
DFSAdmin admin=new DFSAdmin(config);
String[] args=new String[]{"-refreshUserToGroupsMappings"};
Groups groups=Groups.getUserToGroupsMappingService(config);
String user=UserGroupInformation.getCurrentUser().getUserName();
System.out.println("first attempt:");
List g1=groups.getGroups(user);
String[] str_groups=new String[g1.size()];
g1.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
System.out.println("second attempt, should be same:");
List g2=groups.getGroups(user);
g2.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i=0; i < g2.size(); i++) {
assertEquals("Should be same group ",g1.get(i),g2.get(i));
}
admin.run(args);
System.out.println("third attempt(after refresh command), should be different:");
List g3=groups.getGroups(user);
g3.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i=0; i < g3.size(); i++) {
assertFalse("Should be different group: " + g1.get(i) + " and "+ g3.get(i),g1.get(i).equals(g3.get(i)));
}
Thread.sleep(groupRefreshTimeoutSec * 1100);
System.out.println("fourth attempt(after timeout), should be different:");
List g4=groups.getGroups(user);
g4.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i=0; i < g4.size(); i++) {
assertFalse("Should be different group ",g3.get(i).equals(g4.get(i)));
}
}
InternalCallVerifierEqualityVerifier
/**
* In some scenario, such as HA, delegation tokens are associated with a
* logical name. The tokens are cloned and are associated with the
* physical address of the server where the service is provided.
* This test ensures cloned delegated tokens are locally used
* and are not returned in {@link UserGroupInformation#getCredentials()}
*/
@Test public void testPrivateTokenExclusion() throws Exception {
UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
TestTokenIdentifier tokenId=new TestTokenIdentifier();
Token token=new Token(tokenId.getBytes(),"password".getBytes(),tokenId.getKind(),null);
ugi.addToken(new Text("regular-token"),token);
ugi.addToken(new Text("private-token"),new Token.PrivateToken(token));
ugi.addToken(new Text("private-token1"),new Token.PrivateToken(token));
Collection> tokens=ugi.getCredentials().getAllTokens();
assertEquals(1,tokens.size());
}
IterativeVerifierUtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* This test checks a race condition between getting and adding tokens for
* the current user. Calling UserGroupInformation.getCurrentUser() returns
* a new object each time, so simply making these methods synchronized was not
* enough to prevent race conditions and causing a
* ConcurrentModificationException. These methods are synchronized on the
* Subject, which is the same object between UserGroupInformation instances.
* This test tries to cause a CME, by exposing the race condition. Previously
* this test would fail every time; now it does not.
*/
@Test public void testTokenRaceCondition() throws Exception {
UserGroupInformation userGroupInfo=UserGroupInformation.createUserForTesting(USER_NAME,GROUP_NAMES);
userGroupInfo.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
assertNotEquals(UserGroupInformation.getLoginUser(),UserGroupInformation.getCurrentUser());
GetTokenThread thread=new GetTokenThread();
try {
thread.start();
for (int i=0; i < 100; i++) {
@SuppressWarnings("unchecked") Token extends TokenIdentifier> t=mock(Token.class);
when(t.getService()).thenReturn(new Text("t" + i));
UserGroupInformation.getCurrentUser().addToken(t);
assertNull("ConcurrentModificationException encountered",thread.cme);
}
}
catch ( ConcurrentModificationException cme) {
cme.printStackTrace();
fail("ConcurrentModificationException encountered");
}
finally {
thread.runThread=false;
thread.join(5 * 1000);
}
return null;
}
}
);
}
APIUtilityVerifierBranchVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testGetPrincipalNamesFromKeytabWithPattern() throws IOException {
createKeyTab(testKeytab,testPrincipals);
Pattern httpPattern=Pattern.compile("HTTP/.*");
String[] httpPrincipals=KerberosUtil.getPrincipalNames(testKeytab,httpPattern);
Assert.assertNotNull("principals cannot be null",httpPrincipals);
int expectedSize=0;
List httpPrincipalList=Arrays.asList(httpPrincipals);
for ( String principal : testPrincipals) {
if (httpPattern.matcher(principal).matches()) {
Assert.assertTrue("missing principal " + principal,httpPrincipalList.contains(principal));
expectedSize++;
}
}
Assert.assertEquals(expectedSize,httpPrincipals.length);
}
APIUtilityVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testGetPrincipalNamesFromKeytab() throws IOException {
createKeyTab(testKeytab,testPrincipals);
String[] principals=KerberosUtil.getPrincipalNames(testKeytab);
Assert.assertNotNull("principals cannot be null",principals);
int expectedSize=0;
List principalList=Arrays.asList(principals);
for ( String principal : testPrincipals) {
Assert.assertTrue("missing principal " + principal,principalList.contains(principal));
expectedSize++;
}
Assert.assertEquals(expectedSize,principals.length);
}
EqualityVerifier
@Test public void testGetServerPrincipal() throws IOException {
String service="TestKerberosUtil";
String localHostname=KerberosUtil.getLocalHostName();
String testHost="FooBar";
Assert.assertEquals("When no hostname is sent",service + "/" + localHostname.toLowerCase(),KerberosUtil.getServicePrincipal(service,null));
Assert.assertEquals("When empty hostname is sent",service + "/" + localHostname.toLowerCase(),KerberosUtil.getServicePrincipal(service,""));
Assert.assertEquals("When 0.0.0.0 hostname is sent",service + "/" + localHostname.toLowerCase(),KerberosUtil.getServicePrincipal(service,"0.0.0.0"));
Assert.assertEquals("When uppercase hostname is sent",service + "/" + testHost.toLowerCase(),KerberosUtil.getServicePrincipal(service,testHost));
Assert.assertEquals("When lowercase hostname is sent",service + "/" + testHost.toLowerCase(),KerberosUtil.getServicePrincipal(service,testHost.toLowerCase()));
}
InternalCallVerifierEqualityVerifier
@Test(timeout=1000) public void testAddStartedSiblingInStart() throws Throwable {
CompositeService parent=new CompositeService("parent");
BreakableService sibling=new BreakableService();
sibling.init(new Configuration());
sibling.start();
parent.addService(new AddSiblingService(parent,sibling,STATE.STARTED));
parent.init(new Configuration());
parent.start();
parent.stop();
assertEquals("Incorrect number of services",2,parent.getServices().size());
}
IterativeVerifierBranchVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testServiceStartup(){
ServiceManager serviceManager=new ServiceManager("ServiceManager");
for (int i=0; i < NUM_OF_SERVICES; i++) {
CompositeServiceImpl service=new CompositeServiceImpl(i);
if (i == FAILED_SERVICE_SEQ_NUMBER) {
service.setThrowExceptionOnStart(true);
}
serviceManager.addTestService(service);
}
CompositeServiceImpl[] services=serviceManager.getServices().toArray(new CompositeServiceImpl[0]);
Configuration conf=new Configuration();
serviceManager.init(conf);
try {
serviceManager.start();
fail("Exception should have been thrown due to startup failure of last service");
}
catch ( ServiceTestRuntimeException e) {
for (int i=0; i < NUM_OF_SERVICES - 1; i++) {
if (i >= FAILED_SERVICE_SEQ_NUMBER && STOP_ONLY_STARTED_SERVICES) {
assertEquals("Service state should have been ",STATE.INITED,services[NUM_OF_SERVICES - 1].getServiceState());
}
else {
assertEquals("Service state should have been ",STATE.STOPPED,services[i].getServiceState());
}
}
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testRemoveService(){
CompositeService testService=new CompositeService("TestService"){
@Override public void serviceInit( Configuration conf){
Integer notAService=new Integer(0);
assertFalse("Added an integer as a service",addIfService(notAService));
Service service1=new AbstractService("Service1"){
}
;
addIfService(service1);
Service service2=new AbstractService("Service2"){
}
;
addIfService(service2);
Service service3=new AbstractService("Service3"){
}
;
addIfService(service3);
removeService(service1);
}
}
;
testService.init(new Configuration());
assertEquals("Incorrect number of services",2,testService.getServices().size());
}
InternalCallVerifierEqualityVerifier
@Test(timeout=1000) public void testAddStartedSiblingInStop() throws Throwable {
CompositeService parent=new CompositeService("parent");
BreakableService sibling=new BreakableService();
sibling.init(new Configuration());
sibling.start();
parent.addService(new AddSiblingService(parent,sibling,STATE.STOPPED));
parent.init(new Configuration());
parent.start();
parent.stop();
assertEquals("Incorrect number of services",2,parent.getServices().size());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=1000) public void testAddIfService(){
CompositeService testService=new CompositeService("TestService"){
Service service;
@Override public void serviceInit( Configuration conf){
Integer notAService=new Integer(0);
assertFalse("Added an integer as a service",addIfService(notAService));
service=new AbstractService("Service"){
}
;
assertTrue("Unable to add a service",addIfService(service));
}
}
;
testService.init(new Configuration());
assertEquals("Incorrect number of services",1,testService.getServices().size());
}
InternalCallVerifierEqualityVerifier
@Test(timeout=1000) public void testAddUninitedSiblingInStart() throws Throwable {
CompositeService parent=new CompositeService("parent");
BreakableService sibling=new BreakableService();
parent.addService(new AddSiblingService(parent,sibling,STATE.STARTED));
parent.init(new Configuration());
assertInState(STATE.NOTINITED,sibling);
parent.start();
parent.stop();
assertEquals("Incorrect number of services",2,parent.getServices().size());
}
InternalCallVerifierIdentityVerifierEqualityVerifierHybridVerifier
/**
* Test that the {@link BreakableStateChangeListener} is picking up
* the state changes and that its last event field is as expected.
*/
@Test public void testEventHistory(){
register();
BreakableService service=new BreakableService();
assertListenerState(listener,Service.STATE.NOTINITED);
assertEquals(0,listener.getEventCount());
service.init(new Configuration());
assertListenerState(listener,Service.STATE.INITED);
assertSame(service,listener.getLastService());
assertListenerEventCount(listener,1);
service.start();
assertListenerState(listener,Service.STATE.STARTED);
assertListenerEventCount(listener,2);
service.stop();
assertListenerState(listener,Service.STATE.STOPPED);
assertListenerEventCount(listener,3);
}
UtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Show that if the service failed during an init
* operation, stop was called.
*/
@Test public void testStopFailingInitAndStop() throws Throwable {
BreakableService svc=new BreakableService(true,false,true);
svc.registerServiceListener(new LoggingStateChangeListener());
try {
svc.init(new Configuration());
fail("Expected a failure, got " + svc);
}
catch ( BreakableService.BrokenLifecycleEvent e) {
assertEquals(Service.STATE.INITED,e.state);
}
assertServiceStateStopped(svc);
assertEquals(Service.STATE.INITED,svc.getFailureState());
Throwable failureCause=svc.getFailureCause();
assertNotNull("Null failure cause in " + svc,failureCause);
BreakableService.BrokenLifecycleEvent cause=(BreakableService.BrokenLifecycleEvent)failureCause;
assertNotNull("null state in " + cause + " raised by "+ svc,cause.state);
assertEquals(Service.STATE.INITED,cause.state);
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* This test verifies that you can block waiting for something to happen
* and use notifications to manage it
* @throws Throwable on a failure
*/
@Test public void testListenerWithNotifications() throws Throwable {
AsyncSelfTerminatingService service=new AsyncSelfTerminatingService(2000);
NotifyingListener listener=new NotifyingListener();
service.registerServiceListener(listener);
service.init(new Configuration());
service.start();
assertServiceInState(service,Service.STATE.STARTED);
long start=System.currentTimeMillis();
synchronized (listener) {
listener.wait(20000);
}
long duration=System.currentTimeMillis() - start;
assertEquals(Service.STATE.STOPPED,listener.notifyingState);
assertServiceInState(service,Service.STATE.STOPPED);
assertTrue("Duration of " + duration + " too long",duration < 10000);
}
EqualityVerifier
@Test public void waitFor(){
long start=Time.now();
long waited=waitFor(1000,new Predicate(){
@Override public boolean evaluate() throws Exception {
return true;
}
}
);
long end=Time.now();
assertEquals(waited,0,50);
assertEquals(end - start - waited,0,50);
}
EqualityVerifier
@Test public void sleepRatio1(){
setWaitForRatio(1);
long start=Time.now();
sleep(100);
long end=Time.now();
assertEquals(end - start,100,50);
}
EqualityVerifier
@Test public void sleepRatio2(){
setWaitForRatio(1);
long start=Time.now();
sleep(100);
long end=Time.now();
assertEquals(end - start,100 * getWaitForRatio(),50 * getWaitForRatio());
}
EqualityVerifier
@Test public void waitForTimeOutRatio2(){
setWaitForRatio(2);
long start=Time.now();
long waited=waitFor(200,new Predicate(){
@Override public boolean evaluate() throws Exception {
return false;
}
}
);
long end=Time.now();
assertEquals(waited,-1);
assertEquals(end - start,200 * getWaitForRatio(),50 * getWaitForRatio());
}
EqualityVerifier
@Test public void waitForTimeOutRatio1(){
setWaitForRatio(1);
long start=Time.now();
long waited=waitFor(200,new Predicate(){
@Override public boolean evaluate() throws Exception {
return false;
}
}
);
long end=Time.now();
assertEquals(waited,-1);
assertEquals(end - start,200,50);
}
Class: org.apache.hadoop.test.TestHTestCase
EqualityVerifier
@Test public void waitFor(){
long start=Time.now();
long waited=waitFor(1000,new Predicate(){
@Override public boolean evaluate() throws Exception {
return true;
}
}
);
long end=Time.now();
assertEquals(waited,0,50);
assertEquals(end - start - waited,0,50);
}
EqualityVerifier
@Test public void waitForTimeOutRatio1(){
setWaitForRatio(1);
long start=Time.now();
long waited=waitFor(200,new Predicate(){
@Override public boolean evaluate() throws Exception {
return false;
}
}
);
long end=Time.now();
assertEquals(waited,-1);
assertEquals(end - start,200,50);
}
EqualityVerifier
@Test public void waitForTimeOutRatio2(){
setWaitForRatio(2);
long start=Time.now();
long waited=waitFor(200,new Predicate(){
@Override public boolean evaluate() throws Exception {
return false;
}
}
);
long end=Time.now();
assertEquals(waited,-1);
assertEquals(end - start,200 * getWaitForRatio(),50 * getWaitForRatio());
}
EqualityVerifier
@Test public void sleepRatio1(){
setWaitForRatio(1);
long start=Time.now();
sleep(100);
long end=Time.now();
assertEquals(end - start,100,50);
}
EqualityVerifier
@Test public void sleepRatio2(){
setWaitForRatio(1);
long start=Time.now();
sleep(100);
long end=Time.now();
assertEquals(end - start,100 * getWaitForRatio(),50 * getWaitForRatio());
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=500) public void testThreadDumpAndDeadlocks() throws Exception {
new Deadlock();
String s=null;
while (true) {
s=TimedOutTestsListener.buildDeadlockInfo();
if (s != null) break;
Thread.sleep(100);
}
Assert.assertEquals(3,countStringOccurrences(s,"BLOCKED"));
Failure failure=new Failure(null,new Exception(TimedOutTestsListener.TEST_TIMED_OUT_PREFIX));
StringWriter writer=new StringWriter();
new TimedOutTestsListener(new PrintWriter(writer)).testFailure(failure);
String out=writer.toString();
Assert.assertTrue(out.contains("THREAD DUMP"));
Assert.assertTrue(out.contains("DEADLOCKS DETECTED"));
System.out.println(out);
}
Class: org.apache.hadoop.tools.GetGroupsTestBase
APIUtilityVerifierEqualityVerifier
@Test public void testMultipleNonExistingUsers() throws Exception {
String actualOutput=runTool(conf,new String[]{"does-not-exist1","does-not-exist2"},true);
assertEquals("Show the output for only the user given, with no groups",getExpectedOutput(UserGroupInformation.createRemoteUser("does-not-exist1")) + getExpectedOutput(UserGroupInformation.createRemoteUser("does-not-exist2")),actualOutput);
}
APIUtilityVerifierEqualityVerifier
@Test public void testNonExistentUser() throws Exception {
String actualOutput=runTool(conf,new String[]{"does-not-exist"},true);
assertEquals("Show the output for only the user given, with no groups",getExpectedOutput(UserGroupInformation.createRemoteUser("does-not-exist")),actualOutput);
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testMultipleExistingUsers() throws Exception {
String actualOutput=runTool(conf,new String[]{testUser1.getUserName(),testUser2.getUserName()},true);
assertEquals("Show the output for both users given",getExpectedOutput(testUser1) + getExpectedOutput(testUser2),actualOutput);
}
APIUtilityVerifierEqualityVerifier
@Test public void testNoUserGiven() throws Exception {
String actualOutput=runTool(conf,new String[0],true);
UserGroupInformation currentUser=UserGroupInformation.getCurrentUser();
assertEquals("No user provided should default to current user",getExpectedOutput(currentUser),actualOutput);
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testExistingInterleavedWithNonExistentUsers() throws Exception {
String actualOutput=runTool(conf,new String[]{"does-not-exist1",testUser1.getUserName(),"does-not-exist2",testUser2.getUserName()},true);
assertEquals("Show the output for only the user given, with no groups",getExpectedOutput(UserGroupInformation.createRemoteUser("does-not-exist1")) + getExpectedOutput(testUser1) + getExpectedOutput(UserGroupInformation.createRemoteUser("does-not-exist2"))+ getExpectedOutput(testUser2),actualOutput);
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testExistingUser() throws Exception {
String actualOutput=runTool(conf,new String[]{testUser1.getUserName()},true);
assertEquals("Show only the output of the user given",getExpectedOutput(testUser1),actualOutput);
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* test main method of DistCp. Method should to call System.exit().
*/
@Test public void testCleanupTestViaToolRunner() throws IOException, InterruptedException {
Configuration conf=getConf();
Path stagingDir=JobSubmissionFiles.getStagingDir(new Cluster(conf),conf);
stagingDir.getFileSystem(conf).mkdirs(stagingDir);
Path soure=createFile("tmp.txt");
Path target=createFile("target.txt");
try {
String[] arg={target.toString(),soure.toString()};
DistCp.main(arg);
Assert.fail();
}
catch ( ExitException t) {
Assert.assertTrue(fs.exists(target));
Assert.assertEquals(t.status,0);
Assert.assertEquals(stagingDir.getFileSystem(conf).listStatus(stagingDir).length,0);
}
}
Class: org.apache.hadoop.tools.TestHadoopArchives
BranchVerifierInternalCallVerifierEqualityVerifierPublicFieldVerifier
@Test public void testReadFileContent() throws Exception {
fileList.add(createFile(inputPath,fs,"c c"));
final Path sub1=new Path(inputPath,"sub 1");
fs.mkdirs(sub1);
fileList.add(createFile(inputPath,fs,sub1.getName(),"file x y z"));
fileList.add(createFile(inputPath,fs,sub1.getName(),"file"));
fileList.add(createFile(inputPath,fs,sub1.getName(),"x"));
fileList.add(createFile(inputPath,fs,sub1.getName(),"y"));
fileList.add(createFile(inputPath,fs,sub1.getName(),"z"));
final Path sub2=new Path(inputPath,"sub 1 with suffix");
fs.mkdirs(sub2);
fileList.add(createFile(inputPath,fs,sub2.getName(),"z"));
final byte[] binContent=prepareBin();
fileList.add(createFile(inputPath,fs,binContent,sub2.getName(),"bin"));
fileList.add(createFile(inputPath,fs,new byte[0],sub2.getName(),"zero-length"));
final String fullHarPathStr=makeArchive();
final HarFileSystem harFileSystem=new HarFileSystem(fs);
try {
final URI harUri=new URI(fullHarPathStr);
harFileSystem.initialize(harUri,fs.getConf());
int readFileCount=0;
for ( final String pathStr0 : fileList) {
final Path path=new Path(fullHarPathStr + Path.SEPARATOR + pathStr0);
final String baseName=path.getName();
final FileStatus status=harFileSystem.getFileStatus(path);
if (status.isFile()) {
final byte[] actualContentSimple=readAllSimple(harFileSystem.open(path),true);
final byte[] actualContentBuffer=readAllWithBuffer(harFileSystem.open(path),true);
assertArrayEquals(actualContentSimple,actualContentBuffer);
final byte[] actualContentFully=readAllWithReadFully(actualContentSimple.length,harFileSystem.open(path),true);
assertArrayEquals(actualContentSimple,actualContentFully);
final byte[] actualContentSeek=readAllWithSeek(actualContentSimple.length,harFileSystem.open(path),true);
assertArrayEquals(actualContentSimple,actualContentSeek);
final byte[] actualContentRead4=readAllWithRead4(harFileSystem.open(path),true);
assertArrayEquals(actualContentSimple,actualContentRead4);
final byte[] actualContentSkip=readAllWithSkip(actualContentSimple.length,harFileSystem.open(path),harFileSystem.open(path),true);
assertArrayEquals(actualContentSimple,actualContentSkip);
if ("bin".equals(baseName)) {
assertArrayEquals(binContent,actualContentSimple);
}
else if ("zero-length".equals(baseName)) {
assertEquals(0,actualContentSimple.length);
}
else {
String actual=new String(actualContentSimple,"UTF-8");
assertEquals(baseName,actual);
}
readFileCount++;
}
}
assertEquals(fileList.size(),readFileCount);
}
finally {
harFileSystem.close();
}
}
APIUtilityVerifierEqualityVerifier
@Test public void testRelativePath() throws Exception {
final Path sub1=new Path(inputPath,"dir1");
fs.mkdirs(sub1);
createFile(inputPath,fs,sub1.getName(),"a");
final FsShell shell=new FsShell(conf);
final List originalPaths=lsr(shell,"input");
System.out.println("originalPaths: " + originalPaths);
final String fullHarPathStr=makeArchive();
final List harPaths=lsr(shell,fullHarPathStr);
Assert.assertEquals(originalPaths,harPaths);
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testCopyToLocal() throws Exception {
final String fullHarPathStr=makeArchive();
final String tmpDir=System.getProperty("test.build.data","build/test/data") + "/work-dir/har-fs-tmp";
final Path tmpPath=new Path(tmpDir);
final LocalFileSystem localFs=FileSystem.getLocal(new Configuration());
localFs.delete(tmpPath,true);
localFs.mkdirs(tmpPath);
assertTrue(localFs.exists(tmpPath));
final HarFileSystem harFileSystem=new HarFileSystem(fs);
try {
final URI harUri=new URI(fullHarPathStr);
harFileSystem.initialize(harUri,fs.getConf());
final Path sourcePath=new Path(fullHarPathStr + Path.SEPARATOR + "a");
final Path targetPath=new Path(tmpPath,"straus");
harFileSystem.copyToLocalFile(false,sourcePath,targetPath);
FileStatus straus=localFs.getFileStatus(targetPath);
assertEquals(1,straus.getLen());
}
finally {
harFileSystem.close();
localFs.delete(tmpPath,true);
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testPathWithSpaces() throws Exception {
createFile(inputPath,fs,"c c");
final Path sub1=new Path(inputPath,"sub 1");
fs.mkdirs(sub1);
createFile(sub1,fs,"file x y z");
createFile(sub1,fs,"file");
createFile(sub1,fs,"x");
createFile(sub1,fs,"y");
createFile(sub1,fs,"z");
final Path sub2=new Path(inputPath,"sub 1 with suffix");
fs.mkdirs(sub2);
createFile(sub2,fs,"z");
final FsShell shell=new FsShell(conf);
final String inputPathStr=inputPath.toUri().getPath();
final List originalPaths=lsr(shell,inputPathStr);
final String fullHarPathStr=makeArchive();
final List harPaths=lsr(shell,fullHarPathStr);
Assert.assertEquals(originalPaths,harPaths);
}
APIUtilityVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testGetResource() throws IOException {
URL testJar=makeTestJar().toURI().toURL();
ClassLoader currentClassLoader=getClass().getClassLoader();
ClassLoader appClassloader=new ApplicationClassLoader(new URL[]{testJar},currentClassLoader,null);
assertNull("Resource should be null for current classloader",currentClassLoader.getResourceAsStream("resource.txt"));
InputStream in=appClassloader.getResourceAsStream("resource.txt");
assertNotNull("Resource should not be null for app classloader",in);
assertEquals("hello",IOUtils.toString(in));
}
IterativeVerifierUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=60000) public void testAdditionsAndRemovals(){
IdentityHashStore store=new IdentityHashStore(0);
final int NUM_KEYS=1000;
LOG.debug("generating " + NUM_KEYS + " keys");
final List keys=new ArrayList(NUM_KEYS);
for (int i=0; i < NUM_KEYS; i++) {
keys.add(new Key("key " + i));
}
for (int i=0; i < NUM_KEYS; i++) {
store.put(keys.get(i),i);
}
store.visitAll(new Visitor(){
@Override public void accept( Key k, Integer v){
Assert.assertTrue(keys.contains(k));
}
}
);
for (int i=0; i < NUM_KEYS; i++) {
Assert.assertEquals(Integer.valueOf(i),store.remove(keys.get(i)));
}
store.visitAll(new Visitor(){
@Override public void accept( Key k, Integer v){
Assert.fail("expected all entries to be removed");
}
}
);
Assert.assertTrue("expected the store to be " + "empty, but found " + store.numElements() + " elements.",store.isEmpty());
Assert.assertEquals(1024,store.capacity());
}
UtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=60000) public void testStartingWithZeroCapacity(){
IdentityHashStore store=new IdentityHashStore(0);
store.visitAll(new Visitor(){
@Override public void accept( Key k, Integer v){
Assert.fail("found key " + k + " in empty IdentityHashStore.");
}
}
);
Assert.assertTrue(store.isEmpty());
final Key key1=new Key("key1");
Integer value1=new Integer(100);
store.put(key1,value1);
Assert.assertTrue(!store.isEmpty());
Assert.assertEquals(value1,store.get(key1));
store.visitAll(new Visitor(){
@Override public void accept( Key k, Integer v){
Assert.assertEquals(key1,k);
}
}
);
Assert.assertEquals(value1,store.remove(key1));
Assert.assertTrue(store.isEmpty());
}
IterativeVerifierUtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test(timeout=60000) public void testDuplicateInserts(){
IdentityHashStore store=new IdentityHashStore(4);
store.visitAll(new Visitor(){
@Override public void accept( Key k, Integer v){
Assert.fail("found key " + k + " in empty IdentityHashStore.");
}
}
);
Assert.assertTrue(store.isEmpty());
Key key1=new Key("key1");
Integer value1=new Integer(100);
Integer value2=new Integer(200);
Integer value3=new Integer(300);
store.put(key1,value1);
Key equalToKey1=new Key("key1");
Assert.assertNull(store.get(equalToKey1));
Assert.assertTrue(!store.isEmpty());
Assert.assertEquals(value1,store.get(key1));
store.put(key1,value2);
store.put(key1,value3);
final List allValues=new LinkedList();
store.visitAll(new Visitor(){
@Override public void accept( Key k, Integer v){
allValues.add(v);
}
}
);
Assert.assertEquals(3,allValues.size());
for (int i=0; i < 3; i++) {
Integer value=store.remove(key1);
Assert.assertTrue(allValues.remove(value));
}
Assert.assertNull(store.remove(key1));
Assert.assertTrue(store.isEmpty());
}
Class: org.apache.hadoop.util.TestLightWeightGSet
InternalCallVerifierEqualityVerifier
@Test(timeout=60000) public void testRemoveAllViaIterator(){
ArrayList list=getRandomList(100,123);
LightWeightGSet set=new LightWeightGSet(16);
for ( Integer i : list) {
set.put(new TestElement(i));
}
for (Iterator iter=set.iterator(); iter.hasNext(); ) {
iter.next();
iter.remove();
}
Assert.assertEquals(0,set.size());
}
EqualityVerifier
@Test(timeout=30000) public void testSimpleHostName(){
assertEquals("Should return hostname when FQDN is specified","hadoop01",StringUtils.simpleHostname("hadoop01.domain.com"));
assertEquals("Should return hostname when only hostname is specified","hadoop01",StringUtils.simpleHostname("hadoop01"));
assertEquals("Should not truncate when IP address is passed","10.10.5.68",StringUtils.simpleHostname("10.10.5.68"));
}
EqualityVerifier
@Test(timeout=30000) public void testJoin(){
List s=new ArrayList();
s.add("a");
s.add("b");
s.add("c");
assertEquals("",StringUtils.join(":",s.subList(0,0)));
assertEquals("a",StringUtils.join(":",s.subList(0,1)));
assertEquals("a:b",StringUtils.join(":",s.subList(0,2)));
assertEquals("a:b:c",StringUtils.join(":",s.subList(0,3)));
}
EqualityVerifier
@Test(timeout=30000) public void testSimpleSplit() throws Exception {
final String[] TO_TEST={"a/b/c","a/b/c////","///a/b/c","","/","////"};
for ( String testSubject : TO_TEST) {
assertArrayEquals("Testing '" + testSubject + "'",testSubject.split("/"),StringUtils.split(testSubject,'/'));
}
}
UtilityVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testStringToURI(){
String[] str=new String[]{"file://"};
try {
StringUtils.stringToURI(str);
fail("Ignoring URISyntaxException while creating URI from string file://");
}
catch ( IllegalArgumentException iae) {
assertEquals("Failed to create uri for file://",iae.getMessage());
}
}
APIUtilityVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Validate behavior of chmod commands on directories on Windows.
*/
@Test(timeout=30000) public void testBasicChmodOnDir() throws IOException {
File a=new File(TEST_DIR,"a");
File b=new File(a,"b");
a.mkdirs();
assertTrue(b.createNewFile());
chmod("300",a);
String[] files=a.list();
assertTrue("Listing a directory without read permission should fail",null == files);
chmod("700",a);
files=a.list();
assertEquals("b",files[0]);
chmod("500",a);
File c=new File(a,"c");
try {
c.createNewFile();
assertFalse("writeFile should have failed!",true);
}
catch ( IOException ex) {
LOG.info("Expected: Failed to create a file when directory " + "permissions are 577");
}
assertTrue("Special behavior: deleting a file will succeed on Windows " + "even if a user does not have write permissions on the parent dir",b.delete());
assertFalse("Renaming a file should fail on the dir where a user does " + "not have write permissions",b.renameTo(new File(a,"d")));
chmod("700",a);
assertTrue(c.createNewFile());
File d=new File(a,"d");
assertTrue(c.renameTo(d));
chmod("600",a);
files=a.list();
assertEquals("d",files[0]);
assertTrue(d.delete());
File e=new File(a,"e");
assertTrue(e.createNewFile());
assertTrue(e.renameTo(new File(a,"f")));
chmod("700",a);
}
Class: org.apache.hadoop.util.TestZKUtil
APIUtilityVerifierEqualityVerifier
@Test public void testRemoveSpecificPerms(){
int perms=Perms.ALL;
int remove=Perms.CREATE;
int newPerms=ZKUtil.removeSpecificPerms(perms,remove);
assertEquals("Removal failed",0,newPerms & Perms.CREATE);
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testGoodAuths(){
List result=ZKUtil.parseAuth("scheme:data,\n scheme2:user:pass");
assertEquals(2,result.size());
ZKAuthInfo auth0=result.get(0);
assertEquals("scheme",auth0.getScheme());
assertEquals("data",new String(auth0.getAuth()));
ZKAuthInfo auth1=result.get(1);
assertEquals("scheme2",auth1.getScheme());
assertEquals("user:pass",new String(auth1.getAuth()));
}
InternalCallVerifierEqualityVerifier
/**
* Test RegisterNodeManagerResponsePBImpl. Test getters and setters. The
* RegisterNodeManagerResponsePBImpl should generate a prototype and data
* restore from prototype
*/
@Test public void testRegisterNodeManagerResponsePBImpl(){
RegisterNodeManagerResponsePBImpl original=new RegisterNodeManagerResponsePBImpl();
original.setContainerTokenMasterKey(getMasterKey());
original.setNMTokenMasterKey(getMasterKey());
original.setNodeAction(NodeAction.NORMAL);
original.setDiagnosticsMessage("testDiagnosticMessage");
RegisterNodeManagerResponsePBImpl copy=new RegisterNodeManagerResponsePBImpl(original.getProto());
assertEquals(1,copy.getContainerTokenMasterKey().getKeyId());
assertEquals(1,copy.getNMTokenMasterKey().getKeyId());
assertEquals(NodeAction.NORMAL,copy.getNodeAction());
assertEquals("testDiagnosticMessage",copy.getDiagnosticsMessage());
}
InternalCallVerifierEqualityVerifier
@Test public void testGetApplicationsRequest(){
GetApplicationsRequest request=GetApplicationsRequest.newInstance();
EnumSet appStates=EnumSet.of(YarnApplicationState.ACCEPTED);
request.setApplicationStates(appStates);
Set tags=new HashSet();
tags.add("tag1");
request.setApplicationTags(tags);
Set types=new HashSet();
types.add("type1");
request.setApplicationTypes(types);
long startBegin=System.currentTimeMillis();
long startEnd=System.currentTimeMillis() + 1;
request.setStartRange(startBegin,startEnd);
long finishBegin=System.currentTimeMillis() + 2;
long finishEnd=System.currentTimeMillis() + 3;
request.setFinishRange(finishBegin,finishEnd);
long limit=100L;
request.setLimit(limit);
Set queues=new HashSet();
queues.add("queue1");
request.setQueues(queues);
Set users=new HashSet();
users.add("user1");
request.setUsers(users);
ApplicationsRequestScope scope=ApplicationsRequestScope.ALL;
request.setScope(scope);
GetApplicationsRequest requestFromProto=new GetApplicationsRequestPBImpl(((GetApplicationsRequestPBImpl)request).getProto());
Assert.assertEquals(requestFromProto,request);
Assert.assertEquals("ApplicationStates from proto is not the same with original request",requestFromProto.getApplicationStates(),appStates);
Assert.assertEquals("ApplicationTags from proto is not the same with original request",requestFromProto.getApplicationTags(),tags);
Assert.assertEquals("ApplicationTypes from proto is not the same with original request",requestFromProto.getApplicationTypes(),types);
Assert.assertEquals("StartRange from proto is not the same with original request",requestFromProto.getStartRange(),new LongRange(startBegin,startEnd));
Assert.assertEquals("FinishRange from proto is not the same with original request",requestFromProto.getFinishRange(),new LongRange(finishBegin,finishEnd));
Assert.assertEquals("Limit from proto is not the same with original request",requestFromProto.getLimit(),limit);
Assert.assertEquals("Queues from proto is not the same with original request",requestFromProto.getQueues(),queues);
Assert.assertEquals("Users from proto is not the same with original request",requestFromProto.getUsers(),users);
}
InternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test CancelDelegationTokenRequestPBImpl.
* Test a transformation to prototype and back
*/
@Test public void testCancelDelegationTokenRequestPBImpl(){
Token token=getDelegationToken();
CancelDelegationTokenRequestPBImpl original=new CancelDelegationTokenRequestPBImpl();
original.setDelegationToken(token);
CancelDelegationTokenRequestProto protoType=original.getProto();
CancelDelegationTokenRequestPBImpl copy=new CancelDelegationTokenRequestPBImpl(protoType);
assertNotNull(copy.getDelegationToken());
assertEquals(token,copy.getDelegationToken());
}
InternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
/**
* Test RenewDelegationTokenRequestPBImpl.
* Test a transformation to prototype and back
*/
@Test public void testRenewDelegationTokenRequestPBImpl(){
Token token=getDelegationToken();
RenewDelegationTokenRequestPBImpl original=new RenewDelegationTokenRequestPBImpl();
original.setDelegationToken(token);
RenewDelegationTokenRequestProto protoType=original.getProto();
RenewDelegationTokenRequestPBImpl copy=new RenewDelegationTokenRequestPBImpl(protoType);
assertNotNull(copy.getDelegationToken());
assertEquals(token,copy.getDelegationToken());
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierIdentityVerifierEqualityVerifierHybridVerifier
/**
* Simple test Resource request.
* Test hashCode, equals and compare.
*/
@Test public void testResourceRequest(){
Resource resource=recordFactory.newRecordInstance(Resource.class);
Priority priority=recordFactory.newRecordInstance(Priority.class);
ResourceRequest original=ResourceRequest.newInstance(priority,"localhost",resource,2);
ResourceRequest copy=ResourceRequest.newInstance(priority,"localhost",resource,2);
assertTrue(original.equals(copy));
assertEquals(0,original.compareTo(copy));
assertTrue(original.hashCode() == copy.hashCode());
copy.setNumContainers(1);
assertFalse(original.equals(copy));
assertNotSame(0,original.compareTo(copy));
assertFalse(original.hashCode() == copy.hashCode());
}
APIUtilityVerifierInternalCallVerifierIdentityVerifierEqualityVerifierHybridVerifier
@Test public void testRMWebUrlSpecified() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS,"fortesting:24543");
conf.set(YarnConfiguration.RM_ADDRESS,"rmtesting:9999");
String rmWebUrl=WebAppUtils.getRMWebAppURLWithScheme(conf);
String[] parts=rmWebUrl.split(":");
Assert.assertEquals("RM Web URL Port is incrrect",24543,Integer.valueOf(parts[parts.length - 1]).intValue());
Assert.assertNotSame("RM Web Url not resolved correctly. Should not be rmtesting","http://rmtesting:24543",rmWebUrl);
}
InternalCallVerifierEqualityVerifier
/**
* Make a local and log directory inaccessible during initialization
* and verify those bad directories are recognized and removed from
* the list of available local and log directories.
* @throws IOException
*/
@Test public void testDirFailuresOnStartup() throws IOException {
Configuration conf=new YarnConfiguration();
String localDir1=new File(testDir,"localDir1").getPath();
String localDir2=new File(testDir,"localDir2").getPath();
String logDir1=new File(testDir,"logDir1").getPath();
String logDir2=new File(testDir,"logDir2").getPath();
conf.set(YarnConfiguration.NM_LOCAL_DIRS,localDir1 + "," + localDir2);
conf.set(YarnConfiguration.NM_LOG_DIRS,logDir1 + "," + logDir2);
prepareDirToFail(localDir1);
prepareDirToFail(logDir2);
LocalDirsHandlerService dirSvc=new LocalDirsHandlerService();
dirSvc.init(conf);
List localDirs=dirSvc.getLocalDirs();
Assert.assertEquals(1,localDirs.size());
Assert.assertEquals(new Path(localDir2).toString(),localDirs.get(0));
List logDirs=dirSvc.getLogDirs();
Assert.assertEquals(1,logDirs.size());
Assert.assertEquals(new Path(logDir1).toString(),logDirs.get(0));
}
InternalCallVerifierEqualityVerifier
@Test public void testMultipleAttempts() throws Exception {
ApplicationId appId=ApplicationId.newInstance(0,1);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("applicationhistory").path("apps").path(appId.toString()).path("appattempts").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject appAttempts=json.getJSONObject("appAttempts");
assertEquals("incorrect number of elements",1,appAttempts.length());
JSONArray array=appAttempts.getJSONArray("appAttempt");
assertEquals("incorrect number of elements",5,array.length());
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testMultipleContainers() throws Exception {
ApplicationId appId=ApplicationId.newInstance(0,1);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("applicationhistory").path("apps").path(appId.toString()).path("appattempts").path(appAttemptId.toString()).path("containers").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject containers=json.getJSONObject("containers");
assertEquals("incorrect number of elements",1,containers.length());
JSONArray array=containers.getJSONArray("container");
assertEquals("incorrect number of elements",5,array.length());
}
InternalCallVerifierEqualityVerifier
@Test public void testAppsQuery() throws Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("applicationhistory").path("apps").queryParam("state",YarnApplicationState.FINISHED.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",5,array.length());
}
APIUtilityVerifierBranchVerifierInternalCallVerifierEqualityVerifier
@Test(timeout=5000) public void testRunCommandwithPriority() throws Exception {
Configuration conf=new Configuration();
conf.setInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY,2);
String[] command=ContainerExecutor.getRunCommand("echo","group1",conf);
if (Shell.WINDOWS) {
assertEquals("first command should be the run command for the platform",Shell.WINUTILS,command[0]);
}
else {
assertEquals("first command should be nice","nice",command[0]);
assertEquals("second command should be -n","-n",command[1]);
assertEquals("third command should be the priority",Integer.toString(2),command[2]);
}
conf.setInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY,-5);
command=ContainerExecutor.getRunCommand("echo","group1",conf);
if (Shell.WINDOWS) {
assertEquals("first command should be the run command for the platform",Shell.WINUTILS,command[0]);
}
else {
assertEquals("first command should be nice","nice",command[0]);
assertEquals("second command should be -n","-n",command[1]);
assertEquals("third command should be the priority",Integer.toString(-5),command[2]);
}
}
EqualityVerifier
@Test(timeout=5000) public void testContainerLaunchWithPriority() throws IOException {
File f=new File("./src/test/resources/mock-container-executor");
if (!FileUtil.canExecute(f)) {
FileUtil.setExecutable(f,true);
}
String executorPath=f.getAbsolutePath();
Configuration conf=new Configuration();
conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH,executorPath);
conf.setInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY,2);
mockExec.setConf(conf);
List command=new ArrayList();
mockExec.addSchedPriorityCommand(command);
assertEquals("first should be nice","nice",command.get(0));
assertEquals("second should be -n","-n",command.get(1));
assertEquals("third should be the priority",Integer.toString(2),command.get(2));
testContainerLaunch();
}
EqualityVerifier
@Test(timeout=5000) public void testLaunchCommandWithoutPriority() throws IOException {
List command=new ArrayList();
mockExec.addSchedPriorityCommand(command);
assertEquals("addSchedPriority should be empty",0,command.size());
}
BranchVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testAuxServices(){
Configuration conf=new Configuration();
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"Asrv","Bsrv"});
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Asrv"),ServiceA.class,Service.class);
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Bsrv"),ServiceB.class,Service.class);
final AuxServices aux=new AuxServices();
aux.init(conf);
int latch=1;
for ( Service s : aux.getServices()) {
assertEquals(INITED,s.getServiceState());
if (s instanceof ServiceA) {
latch*=2;
}
else if (s instanceof ServiceB) {
latch*=3;
}
else fail("Unexpected service type " + s.getClass());
}
assertEquals("Invalid mix of services",6,latch);
aux.start();
for ( Service s : aux.getServices()) {
assertEquals(STARTED,s.getServiceState());
}
aux.stop();
for ( Service s : aux.getServices()) {
assertEquals(STOPPED,s.getServiceState());
}
}
BranchVerifierUtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testAuxServicesMeta(){
Configuration conf=new Configuration();
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"Asrv","Bsrv"});
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Asrv"),ServiceA.class,Service.class);
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Bsrv"),ServiceB.class,Service.class);
final AuxServices aux=new AuxServices();
aux.init(conf);
int latch=1;
for ( Service s : aux.getServices()) {
assertEquals(INITED,s.getServiceState());
if (s instanceof ServiceA) {
latch*=2;
}
else if (s instanceof ServiceB) {
latch*=3;
}
else fail("Unexpected service type " + s.getClass());
}
assertEquals("Invalid mix of services",6,latch);
aux.start();
for ( Service s : aux.getServices()) {
assertEquals(STARTED,s.getServiceState());
}
Map meta=aux.getMetaData();
assertEquals(2,meta.size());
assertEquals("A",new String(meta.get("Asrv").array()));
assertEquals("B",new String(meta.get("Bsrv").array()));
aux.stop();
for ( Service s : aux.getServices()) {
assertEquals(STOPPED,s.getServiceState());
}
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testIncrementFileCountForPath(){
YarnConfiguration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY,LocalCacheDirectoryManager.DIRECTORIES_PER_LEVEL + 2);
LocalCacheDirectoryManager mgr=new LocalCacheDirectoryManager(conf);
final String rootPath="";
mgr.incrementFileCountForPath(rootPath);
Assert.assertEquals(rootPath,mgr.getRelativePathForLocalization());
Assert.assertFalse("root dir should be full",rootPath.equals(mgr.getRelativePathForLocalization()));
mgr.getRelativePathForLocalization();
mgr.decrementFileCountForPath(rootPath);
mgr.decrementFileCountForPath(rootPath);
Assert.assertEquals(rootPath,mgr.getRelativePathForLocalization());
Assert.assertEquals(rootPath,mgr.getRelativePathForLocalization());
String otherDir=mgr.getRelativePathForLocalization();
Assert.assertFalse("root dir should be full",otherDir.equals(rootPath));
final String deepDir0="d/e/e/p/0";
final String deepDir1="d/e/e/p/1";
final String deepDir2="d/e/e/p/2";
final String deepDir3="d/e/e/p/3";
mgr.incrementFileCountForPath(deepDir0);
Assert.assertEquals(otherDir,mgr.getRelativePathForLocalization());
Assert.assertEquals(deepDir0,mgr.getRelativePathForLocalization());
Assert.assertEquals("total dir count incorrect after increment",deepDir1,mgr.getRelativePathForLocalization());
mgr.incrementFileCountForPath(deepDir2);
mgr.incrementFileCountForPath(deepDir1);
mgr.incrementFileCountForPath(deepDir2);
Assert.assertEquals(deepDir3,mgr.getRelativePathForLocalization());
}
InternalCallVerifierEqualityVerifier
@Test public void testRMAppRetireNullApp() throws Exception {
long now=System.currentTimeMillis();
RMContext rmContext=mockRMContext(10,now - 20000);
TestRMAppManager appMonitor=new TestRMAppManager(rmContext,new Configuration());
Assert.assertEquals("Number of apps incorrect before",10,rmContext.getRMApps().size());
appMonitor.finishApplication(null);
Assert.assertEquals("Number of completed apps incorrect after check",0,appMonitor.getCompletedAppsListSize());
}
APIUtilityVerifierUtilityVerifierInternalCallVerifierBooleanVerifierNullVerifierEqualityVerifierHybridVerifier
@Test(timeout=30000) public void testRMAppSubmitDuplicateApplicationId() throws Exception {
ApplicationId appId=MockApps.newAppID(0);
asContext.setApplicationId(appId);
RMApp appOrig=rmContext.getRMApps().get(appId);
Assert.assertTrue("app name matches but shouldn't","testApp1" != appOrig.getName());
try {
appMonitor.submitApplication(asContext,"test");
Assert.fail("Exception is expected when applicationId is duplicate.");
}
catch ( YarnException e) {
Assert.assertTrue("The thrown exception is not the expectd one.",e.getMessage().contains("Cannot add a duplicate!"));
}
RMApp app=rmContext.getRMApps().get(appId);
Assert.assertNotNull("app is null",app);
Assert.assertEquals("app id doesn't match",appId,app.getApplicationId());
Assert.assertEquals("app state doesn't match",RMAppState.FINISHED,app.getState());
}
InternalCallVerifierEqualityVerifier
@Test public void testRMAppRetireZeroSetting() throws Exception {
long now=System.currentTimeMillis();
RMContext rmContext=mockRMContext(10,now - 20000);
Configuration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS,0);
conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS,0);
TestRMAppManager appMonitor=new TestRMAppManager(rmContext,conf);
Assert.assertEquals("Number of apps incorrect before",10,rmContext.getRMApps().size());
addToCompletedApps(appMonitor,rmContext);
Assert.assertEquals("Number of completed apps incorrect",10,appMonitor.getCompletedAppsListSize());
appMonitor.checkAppNumCompletedLimit();
Assert.assertEquals("Number of apps incorrect after # completed check",0,rmContext.getRMApps().size());
Assert.assertEquals("Number of completed apps incorrect after check",0,appMonitor.getCompletedAppsListSize());
verify(rmContext.getStateStore(),times(10)).removeApplication(isA(RMApp.class));
}
InternalCallVerifierEqualityVerifier
@Test public void testRMAppRetireNone() throws Exception {
long now=System.currentTimeMillis();
RMContext rmContext=mockRMContext(10,now - 10);
Configuration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS,10);
TestRMAppManager appMonitor=new TestRMAppManager(rmContext,conf);
Assert.assertEquals("Number of apps incorrect before checkAppTimeLimit",10,rmContext.getRMApps().size());
addToCompletedApps(appMonitor,rmContext);
appMonitor.checkAppNumCompletedLimit();
Assert.assertEquals("Number of apps incorrect after # completed check",10,rmContext.getRMApps().size());
Assert.assertEquals("Number of completed apps incorrect after check",10,appMonitor.getCompletedAppsListSize());
verify(rmContext.getStateStore(),never()).removeApplication(isA(RMApp.class));
}
InternalCallVerifierEqualityVerifier
@Test public void testStateStoreAppLimitLargerThanMemoryAppLimit(){
long now=System.currentTimeMillis();
RMContext rmContext=mockRMContext(10,now - 20000);
Configuration conf=new YarnConfiguration();
int maxAppsInMemory=8;
conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS,maxAppsInMemory);
conf.setInt(YarnConfiguration.RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS,1000);
TestRMAppManager appMonitor=new TestRMAppManager(rmContext,conf);
addToCompletedApps(appMonitor,rmContext);
Assert.assertEquals("Number of completed apps incorrect",10,appMonitor.getCompletedAppsListSize());
appMonitor.checkAppNumCompletedLimit();
int numRemoveApps=10 - maxAppsInMemory;
Assert.assertEquals("Number of apps incorrect after # completed check",maxAppsInMemory,rmContext.getRMApps().size());
Assert.assertEquals("Number of completed apps incorrect after check",maxAppsInMemory,appMonitor.getCompletedAppsListSize());
verify(rmContext.getStateStore(),times(numRemoveApps)).removeApplication(isA(RMApp.class));
Assert.assertEquals(maxAppsInMemory,appMonitor.getCompletedAppsInStateStore());
}
InternalCallVerifierEqualityVerifier
@Test public void testStateStoreAppLimitLessThanMemoryAppLimit(){
long now=System.currentTimeMillis();
RMContext rmContext=mockRMContext(10,now - 20000);
Configuration conf=new YarnConfiguration();
int maxAppsInMemory=8;
int maxAppsInStateStore=4;
conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS,maxAppsInMemory);
conf.setInt(YarnConfiguration.RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS,maxAppsInStateStore);
TestRMAppManager appMonitor=new TestRMAppManager(rmContext,conf);
addToCompletedApps(appMonitor,rmContext);
Assert.assertEquals("Number of completed apps incorrect",10,appMonitor.getCompletedAppsListSize());
appMonitor.checkAppNumCompletedLimit();
Assert.assertEquals("Number of apps incorrect after # completed check",maxAppsInMemory,rmContext.getRMApps().size());
Assert.assertEquals("Number of completed apps incorrect after check",maxAppsInMemory,appMonitor.getCompletedAppsListSize());
int numRemoveAppsFromStateStore=10 - maxAppsInStateStore;
verify(rmContext.getStateStore(),times(numRemoveAppsFromStateStore)).removeApplication(isA(RMApp.class));
Assert.assertEquals(maxAppsInStateStore,appMonitor.getCompletedAppsInStateStore());
}
APIUtilityVerifierInternalCallVerifierNullVerifierEqualityVerifierHybridVerifier
@Test public void testRMAppSubmit() throws Exception {
appMonitor.submitApplication(asContext,"test");
RMApp app=rmContext.getRMApps().get(appId);
Assert.assertNotNull("app is null",app);
Assert.assertEquals("app id doesn't match",appId,app.getApplicationId());
Assert.assertEquals("app state doesn't match",RMAppState.NEW,app.getState());
int timeoutSecs=0;
while ((getAppEventType() == RMAppEventType.KILL) && timeoutSecs++ < 20) {
Thread.sleep(1000);
}
Assert.assertEquals("app event type sent is wrong",RMAppEventType.START,getAppEventType());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testForceKillApplication() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
MockRM rm=new MockRM();
rm.init(conf);
rm.start();
ClientRMService rmService=rm.getClientRMService();
GetApplicationsRequest getRequest=GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.KILLED));
RMApp app1=rm.submitApp(1024);
RMApp app2=rm.submitApp(1024,true);
assertEquals("Incorrect number of apps in the RM",0,rmService.getApplications(getRequest).getApplicationList().size());
KillApplicationRequest killRequest1=KillApplicationRequest.newInstance(app1.getApplicationId());
KillApplicationRequest killRequest2=KillApplicationRequest.newInstance(app2.getApplicationId());
int killAttemptCount=0;
for (int i=0; i < 100; i++) {
KillApplicationResponse killResponse1=rmService.forceKillApplication(killRequest1);
killAttemptCount++;
if (killResponse1.getIsKillCompleted()) {
break;
}
Thread.sleep(10);
}
assertTrue("Kill attempt count should be greater than 1 for managed AMs",killAttemptCount > 1);
assertEquals("Incorrect number of apps in the RM",1,rmService.getApplications(getRequest).getApplicationList().size());
KillApplicationResponse killResponse2=rmService.forceKillApplication(killRequest2);
assertTrue("Killing UnmanagedAM should falsely acknowledge true",killResponse2.getIsKillCompleted());
for (int i=0; i < 100; i++) {
if (2 == rmService.getApplications(getRequest).getApplicationList().size()) {
break;
}
Thread.sleep(10);
}
assertEquals("Incorrect number of apps in the RM",2,rmService.getApplications(getRequest).getApplicationList().size());
}
InternalCallVerifierEqualityVerifier
@Test public void testGetApplications() throws IOException, YarnException {
YarnScheduler yarnScheduler=mockYarnScheduler();
RMContext rmContext=mock(RMContext.class);
mockRMContext(yarnScheduler,rmContext);
RMStateStore stateStore=mock(RMStateStore.class);
when(rmContext.getStateStore()).thenReturn(stateStore);
RMAppManager appManager=new RMAppManager(rmContext,yarnScheduler,null,mock(ApplicationACLsManager.class),new Configuration());
when(rmContext.getDispatcher().getEventHandler()).thenReturn(new EventHandler(){
public void handle( Event event){
}
}
);
ApplicationACLsManager mockAclsManager=mock(ApplicationACLsManager.class);
QueueACLsManager mockQueueACLsManager=mock(QueueACLsManager.class);
when(mockQueueACLsManager.checkAccess(any(UserGroupInformation.class),any(QueueACL.class),anyString())).thenReturn(true);
ClientRMService rmService=new ClientRMService(rmContext,yarnScheduler,appManager,mockAclsManager,mockQueueACLsManager,null);
String[] queues={QUEUE_1,QUEUE_2};
String[] appNames={MockApps.newAppName(),MockApps.newAppName(),MockApps.newAppName()};
ApplicationId[] appIds={getApplicationId(101),getApplicationId(102),getApplicationId(103)};
List tags=Arrays.asList("Tag1","Tag2","Tag3");
long[] submitTimeMillis=new long[3];
for (int i=0; i < appIds.length; i++) {
ApplicationId appId=appIds[i];
when(mockAclsManager.checkAccess(UserGroupInformation.getCurrentUser(),ApplicationAccessType.VIEW_APP,null,appId)).thenReturn(true);
SubmitApplicationRequest submitRequest=mockSubmitAppRequest(appId,appNames[i],queues[i % queues.length],new HashSet(tags.subList(0,i + 1)));
rmService.submitApplication(submitRequest);
submitTimeMillis[i]=System.currentTimeMillis();
}
GetApplicationsRequest request=GetApplicationsRequest.newInstance();
assertEquals("Incorrect total number of apps",6,rmService.getApplications(request).getApplicationList().size());
request.setLimit(1L);
assertEquals("Failed to limit applications",1,rmService.getApplications(request).getApplicationList().size());
request=GetApplicationsRequest.newInstance();
request.setStartRange(submitTimeMillis[0],System.currentTimeMillis());
assertEquals("Incorrect number of matching start range",2,rmService.getApplications(request).getApplicationList().size());
request.setStartRange(submitTimeMillis[1],System.currentTimeMillis());
assertEquals("Incorrect number of matching start range",1,rmService.getApplications(request).getApplicationList().size());
request.setStartRange(submitTimeMillis[2],System.currentTimeMillis());
assertEquals("Incorrect number of matching start range",0,rmService.getApplications(request).getApplicationList().size());
request=GetApplicationsRequest.newInstance();
Set queueSet=new HashSet();
request.setQueues(queueSet);
queueSet.add(queues[0]);
assertEquals("Incorrect number of applications in queue",2,rmService.getApplications(request).getApplicationList().size());
assertEquals("Incorrect number of applications in queue",2,rmService.getApplications(request,false).getApplicationList().size());
queueSet.add(queues[1]);
assertEquals("Incorrect number of applications in queue",3,rmService.getApplications(request).getApplicationList().size());
request=GetApplicationsRequest.newInstance();
Set userSet=new HashSet();
request.setUsers(userSet);
userSet.add("random-user-name");
assertEquals("Incorrect number of applications for user",0,rmService.getApplications(request).getApplicationList().size());
userSet.add(UserGroupInformation.getCurrentUser().getShortUserName());
assertEquals("Incorrect number of applications for user",3,rmService.getApplications(request).getApplicationList().size());
request=GetApplicationsRequest.newInstance(ApplicationsRequestScope.ALL,null,null,null,null,null,null,null,null);
Set tagSet=new HashSet();
request.setApplicationTags(tagSet);
assertEquals("Incorrect number of matching tags",6,rmService.getApplications(request).getApplicationList().size());
tagSet=Sets.newHashSet(tags.get(0));
request.setApplicationTags(tagSet);
assertEquals("Incorrect number of matching tags",3,rmService.getApplications(request).getApplicationList().size());
tagSet=Sets.newHashSet(tags.get(1));
request.setApplicationTags(tagSet);
assertEquals("Incorrect number of matching tags",2,rmService.getApplications(request).getApplicationList().size());
tagSet=Sets.newHashSet(tags.get(2));
request.setApplicationTags(tagSet);
assertEquals("Incorrect number of matching tags",1,rmService.getApplications(request).getApplicationList().size());
request=GetApplicationsRequest.newInstance(ApplicationsRequestScope.VIEWABLE);
assertEquals("Incorrect number of applications for the scope",6,rmService.getApplications(request).getApplicationList().size());
request=GetApplicationsRequest.newInstance(ApplicationsRequestScope.OWN);
assertEquals("Incorrect number of applications for the scope",3,rmService.getApplications(request).getApplicationList().size());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testRMDispatcherForHA() throws IOException {
String errorMessageForEventHandler="Expect to get the same number of handlers";
String errorMessageForService="Expect to get the same number of services";
configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false);
Configuration conf=new YarnConfiguration(configuration);
rm=new MockRM(conf){
@Override protected Dispatcher createDispatcher(){
return new MyCountingDispatcher();
}
}
;
rm.init(conf);
int expectedEventHandlerCount=((MyCountingDispatcher)rm.getRMContext().getDispatcher()).getEventHandlerCount();
int expectedServiceCount=rm.getServices().size();
assertTrue(expectedEventHandlerCount != 0);
StateChangeRequestInfo requestInfo=new StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER);
assertEquals(STATE_ERR,HAServiceState.INITIALIZING,rm.adminService.getServiceStatus().getState());
assertFalse("RM is ready to become active before being started",rm.adminService.getServiceStatus().isReadyToBecomeActive());
rm.start();
rm.adminService.transitionToStandby(requestInfo);
rm.adminService.transitionToActive(requestInfo);
rm.adminService.transitionToStandby(requestInfo);
rm.adminService.transitionToActive(requestInfo);
rm.adminService.transitionToStandby(requestInfo);
MyCountingDispatcher dispatcher=(MyCountingDispatcher)rm.getRMContext().getDispatcher();
assertTrue(!dispatcher.isStopped());
rm.adminService.transitionToActive(requestInfo);
assertEquals(errorMessageForEventHandler,expectedEventHandlerCount,((MyCountingDispatcher)rm.getRMContext().getDispatcher()).getEventHandlerCount());
assertEquals(errorMessageForService,expectedServiceCount,rm.getServices().size());
dispatcher=(MyCountingDispatcher)rm.getRMContext().getDispatcher();
rm.adminService.transitionToStandby(requestInfo);
assertEquals(errorMessageForEventHandler,expectedEventHandlerCount,((MyCountingDispatcher)rm.getRMContext().getDispatcher()).getEventHandlerCount());
assertEquals(errorMessageForService,expectedServiceCount,rm.getServices().size());
assertTrue(dispatcher.isStopped());
rm.stop();
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
/**
* Test to verify the following RM HA transitions to the following states.
* 1. Standby: Should be a no-op
* 2. Active: Active services should start
* 3. Active: Should be a no-op.
* While active, submit a couple of jobs
* 4. Standby: Active services should stop
* 5. Active: Active services should start
* 6. Stop the RM: All services should stop and RM should not be ready to
* become Active
*/
@Test(timeout=30000) public void testFailoverAndTransitions() throws Exception {
configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false);
Configuration conf=new YarnConfiguration(configuration);
rm=new MockRM(conf);
rm.init(conf);
StateChangeRequestInfo requestInfo=new StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER);
assertEquals(STATE_ERR,HAServiceState.INITIALIZING,rm.adminService.getServiceStatus().getState());
assertFalse("RM is ready to become active before being started",rm.adminService.getServiceStatus().isReadyToBecomeActive());
checkMonitorHealth();
rm.start();
checkMonitorHealth();
checkStandbyRMFunctionality();
verifyClusterMetrics(0,0,0,0,0,0);
rm.adminService.transitionToStandby(requestInfo);
checkMonitorHealth();
checkStandbyRMFunctionality();
verifyClusterMetrics(0,0,0,0,0,0);
rm.adminService.transitionToActive(requestInfo);
checkMonitorHealth();
checkActiveRMFunctionality();
verifyClusterMetrics(1,1,1,1,2048,1);
rm.adminService.transitionToActive(requestInfo);
checkMonitorHealth();
checkActiveRMFunctionality();
verifyClusterMetrics(1,2,2,2,2048,2);
rm.adminService.transitionToStandby(requestInfo);
checkMonitorHealth();
checkStandbyRMFunctionality();
verifyClusterMetrics(0,0,0,0,0,0);
rm.adminService.transitionToActive(requestInfo);
checkMonitorHealth();
checkActiveRMFunctionality();
verifyClusterMetrics(1,1,1,1,2048,1);
rm.stop();
assertEquals(STATE_ERR,HAServiceState.STOPPING,rm.adminService.getServiceStatus().getState());
assertFalse("RM is ready to become active even after it is stopped",rm.adminService.getServiceStatus().isReadyToBecomeActive());
assertFalse("Active RM services are started",rm.areActiveServicesRunning());
checkMonitorHealth();
}
InternalCallVerifierEqualityVerifier
@Test public void testRunningExpire(){
RMNodeImpl node=getRunningNode();
ClusterMetrics cm=ClusterMetrics.getMetrics();
int initialActive=cm.getNumActiveNMs();
int initialLost=cm.getNumLostNMs();
int initialUnhealthy=cm.getUnhealthyNMs();
int initialDecommissioned=cm.getNumDecommisionedNMs();
int initialRebooted=cm.getNumRebootedNMs();
node.handle(new RMNodeEvent(node.getNodeID(),RMNodeEventType.EXPIRE));
Assert.assertEquals("Active Nodes",initialActive - 1,cm.getNumActiveNMs());
Assert.assertEquals("Lost Nodes",initialLost + 1,cm.getNumLostNMs());
Assert.assertEquals("Unhealthy Nodes",initialUnhealthy,cm.getUnhealthyNMs());
Assert.assertEquals("Decommissioned Nodes",initialDecommissioned,cm.getNumDecommisionedNMs());
Assert.assertEquals("Rebooted Nodes",initialRebooted,cm.getNumRebootedNMs());
Assert.assertEquals(NodeState.LOST,node.getState());
}
InternalCallVerifierEqualityVerifier
@Test public void testUnhealthyRebooting(){
RMNodeImpl node=getUnhealthyNode();
ClusterMetrics cm=ClusterMetrics.getMetrics();
int initialActive=cm.getNumActiveNMs();
int initialLost=cm.getNumLostNMs();
int initialUnhealthy=cm.getUnhealthyNMs();
int initialDecommissioned=cm.getNumDecommisionedNMs();
int initialRebooted=cm.getNumRebootedNMs();
node.handle(new RMNodeEvent(node.getNodeID(),RMNodeEventType.REBOOTING));
Assert.assertEquals("Active Nodes",initialActive,cm.getNumActiveNMs());
Assert.assertEquals("Lost Nodes",initialLost,cm.getNumLostNMs());
Assert.assertEquals("Unhealthy Nodes",initialUnhealthy - 1,cm.getUnhealthyNMs());
Assert.assertEquals("Decommissioned Nodes",initialDecommissioned,cm.getNumDecommisionedNMs());
Assert.assertEquals("Rebooted Nodes",initialRebooted + 1,cm.getNumRebootedNMs());
Assert.assertEquals(NodeState.REBOOTED,node.getState());
}
InternalCallVerifierEqualityVerifier
@Test public void testUnhealthyDecommission(){
RMNodeImpl node=getUnhealthyNode();
ClusterMetrics cm=ClusterMetrics.getMetrics();
int initialActive=cm.getNumActiveNMs();
int initialLost=cm.getNumLostNMs();
int initialUnhealthy=cm.getUnhealthyNMs();
int initialDecommissioned=cm.getNumDecommisionedNMs();
int initialRebooted=cm.getNumRebootedNMs();
node.handle(new RMNodeEvent(node.getNodeID(),RMNodeEventType.DECOMMISSION));
Assert.assertEquals("Active Nodes",initialActive,cm.getNumActiveNMs());
Assert.assertEquals("Lost Nodes",initialLost,cm.getNumLostNMs());
Assert.assertEquals("Unhealthy Nodes",initialUnhealthy - 1,cm.getUnhealthyNMs());
Assert.assertEquals("Decommissioned Nodes",initialDecommissioned + 1,cm.getNumDecommisionedNMs());
Assert.assertEquals("Rebooted Nodes",initialRebooted,cm.getNumRebootedNMs());
Assert.assertEquals(NodeState.DECOMMISSIONED,node.getState());
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testReconnnectUpdate(){
final String nmVersion1="nm version 1";
final String nmVersion2="nm version 2";
RMNodeImpl node=getRunningNode(nmVersion1);
Assert.assertEquals(nmVersion1,node.getNodeManagerVersion());
RMNodeImpl reconnectingNode=getRunningNode(nmVersion2);
node.handle(new RMNodeReconnectEvent(node.getNodeID(),reconnectingNode,null));
Assert.assertEquals(nmVersion2,node.getNodeManagerVersion());
}
InternalCallVerifierEqualityVerifier
@Test public void testNodeRegistrationFailure() throws Exception {
writeToHostsFile("host1");
Configuration conf=new Configuration();
conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH,hostFile.getAbsolutePath());
rm=new MockRM(conf);
rm.start();
ResourceTrackerService resourceTrackerService=rm.getResourceTrackerService();
RegisterNodeManagerRequest req=Records.newRecord(RegisterNodeManagerRequest.class);
NodeId nodeId=NodeId.newInstance("host2",1234);
req.setNodeId(nodeId);
req.setHttpPort(1234);
RegisterNodeManagerResponse response=resourceTrackerService.registerNodeManager(req);
Assert.assertEquals(NodeAction.SHUTDOWN,response.getNodeAction());
Assert.assertEquals("Disallowed NodeManager from host2, Sending SHUTDOWN signal to the NodeManager.",response.getDiagnosticsMessage());
}
InternalCallVerifierEqualityVerifier
/**
* Decommissioning using a post-configured include hosts file
*/
@Test public void testAddNewIncludePathToConfiguration() throws Exception {
Configuration conf=new Configuration();
rm=new MockRM(conf);
rm.start();
MockNM nm1=rm.registerNode("host1:1234",5120);
MockNM nm2=rm.registerNode("host2:5678",10240);
ClusterMetrics metrics=ClusterMetrics.getMetrics();
assert (metrics != null);
int initialMetricCount=metrics.getNumDecommisionedNMs();
NodeHeartbeatResponse nodeHeartbeat=nm1.nodeHeartbeat(true);
Assert.assertEquals(NodeAction.NORMAL,nodeHeartbeat.getNodeAction());
nodeHeartbeat=nm2.nodeHeartbeat(true);
Assert.assertEquals(NodeAction.NORMAL,nodeHeartbeat.getNodeAction());
writeToHostsFile("host1");
conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH,hostFile.getAbsolutePath());
rm.getNodesListManager().refreshNodes(conf);
nodeHeartbeat=nm1.nodeHeartbeat(true);
Assert.assertEquals("Node should not have been decomissioned.",NodeAction.NORMAL,nodeHeartbeat.getNodeAction());
nodeHeartbeat=nm2.nodeHeartbeat(true);
Assert.assertEquals("Node should have been decomissioned but is in state" + nodeHeartbeat.getNodeAction(),NodeAction.SHUTDOWN,nodeHeartbeat.getNodeAction());
checkDecommissionedNMCount(rm,++initialMetricCount);
}
InternalCallVerifierEqualityVerifier
/**
* Decommissioning using a post-configured exclude hosts file
*/
@Test public void testAddNewExcludePathToConfiguration() throws Exception {
Configuration conf=new Configuration();
rm=new MockRM(conf);
rm.start();
MockNM nm1=rm.registerNode("host1:1234",5120);
MockNM nm2=rm.registerNode("host2:5678",10240);
ClusterMetrics metrics=ClusterMetrics.getMetrics();
assert (metrics != null);
int initialMetricCount=metrics.getNumDecommisionedNMs();
NodeHeartbeatResponse nodeHeartbeat=nm1.nodeHeartbeat(true);
Assert.assertEquals(NodeAction.NORMAL,nodeHeartbeat.getNodeAction());
nodeHeartbeat=nm2.nodeHeartbeat(true);
Assert.assertEquals(NodeAction.NORMAL,nodeHeartbeat.getNodeAction());
writeToHostsFile("host2");
conf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH,hostFile.getAbsolutePath());
rm.getNodesListManager().refreshNodes(conf);
nodeHeartbeat=nm1.nodeHeartbeat(true);
Assert.assertEquals("Node should not have been decomissioned.",NodeAction.NORMAL,nodeHeartbeat.getNodeAction());
nodeHeartbeat=nm2.nodeHeartbeat(true);
Assert.assertEquals("Node should have been decomissioned but is in state" + nodeHeartbeat.getNodeAction(),NodeAction.SHUTDOWN,nodeHeartbeat.getNodeAction());
checkDecommissionedNMCount(rm,++initialMetricCount);
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testNodeRegistrationVersionLessThanRM() throws Exception {
writeToHostsFile("host2");
Configuration conf=new Configuration();
conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH,hostFile.getAbsolutePath());
conf.set(YarnConfiguration.RM_NODEMANAGER_MINIMUM_VERSION,"EqualToRM");
rm=new MockRM(conf);
rm.start();
String nmVersion="1.9.9";
ResourceTrackerService resourceTrackerService=rm.getResourceTrackerService();
RegisterNodeManagerRequest req=Records.newRecord(RegisterNodeManagerRequest.class);
NodeId nodeId=NodeId.newInstance("host2",1234);
Resource capability=BuilderUtils.newResource(1024,1);
req.setResource(capability);
req.setNodeId(nodeId);
req.setHttpPort(1234);
req.setNMVersion(nmVersion);
RegisterNodeManagerResponse response=resourceTrackerService.registerNodeManager(req);
Assert.assertEquals(NodeAction.SHUTDOWN,response.getNodeAction());
Assert.assertTrue("Diagnostic message did not contain: 'Disallowed NodeManager " + "Version " + nmVersion + ", is less than the minimum version'",response.getDiagnosticsMessage().contains("Disallowed NodeManager Version " + nmVersion + ", is less than the minimum version "));
}
InternalCallVerifierEqualityVerifier
@Test(timeout=3000) public void testWeights(){
ResourceWeights rw1=new ResourceWeights();
Assert.assertEquals("Default CPU weight should be 0.0f.",0.0f,rw1.getWeight(ResourceType.CPU),0.00001f);
Assert.assertEquals("Default memory weight should be 0.0f",0.0f,rw1.getWeight(ResourceType.MEMORY),0.00001f);
ResourceWeights rw2=new ResourceWeights(2.0f);
Assert.assertEquals("The CPU weight should be 2.0f.",2.0f,rw2.getWeight(ResourceType.CPU),0.00001f);
Assert.assertEquals("The memory weight should be 2.0f",2.0f,rw2.getWeight(ResourceType.MEMORY),0.00001f);
ResourceWeights rw3=new ResourceWeights(1.5f,2.0f);
Assert.assertEquals("The CPU weight should be 2.0f",2.0f,rw3.getWeight(ResourceType.CPU),0.00001f);
Assert.assertEquals("The memory weight should be 1.5f",1.5f,rw3.getWeight(ResourceType.MEMORY),0.00001f);
rw3.setWeight(ResourceType.CPU,2.5f);
Assert.assertEquals("The CPU weight should be set to 2.5f.",2.5f,rw3.getWeight(ResourceType.CPU),0.00001f);
rw3.setWeight(ResourceType.MEMORY,4.0f);
Assert.assertEquals("The memory weight should be set to 4.0f.",4.0f,rw3.getWeight(ResourceType.MEMORY),0.00001f);
}
InternalCallVerifierEqualityVerifier
@Test public void testUnmanagedAMUnexpectedRegistration(){
unmanagedAM=true;
when(submissionContext.getUnmanagedAM()).thenReturn(true);
submitApplicationAttempt();
assertEquals(RMAppAttemptState.SUBMITTED,applicationAttempt.getAppAttemptState());
applicationAttempt.handle(new RMAppAttemptRegistrationEvent(applicationAttempt.getAppAttemptId(),"host",8042,"oldtrackingurl"));
assertEquals(YarnApplicationAttemptState.SUBMITTED,applicationAttempt.createApplicationAttemptState());
testAppAttemptSubmittedToFailedState("Unmanaged AM must register after AM attempt reaches LAUNCHED state.");
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testQueueMaxAMShare() throws Exception {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE);
PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println("");
out.println("0.2");
out.println("");
out.println("");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(20480,20),0,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node);
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node);
scheduler.handle(nodeEvent);
scheduler.update();
FSLeafQueue queue1=scheduler.getQueueManager().getLeafQueue("queue1",true);
assertEquals("Queue queue1's fair share should be 0",0,queue1.getFairShare().getMemory());
createSchedulingRequest(1 * 1024,"root.default","user1");
scheduler.update();
scheduler.handle(updateEvent);
Resource amResource1=Resource.newInstance(1024,1);
Resource amResource2=Resource.newInstance(2048,2);
Resource amResource3=Resource.newInstance(1860,2);
int amPriority=RMAppAttemptImpl.AM_CONTAINER_PRIORITY.getPriority();
ApplicationAttemptId attId1=createAppAttemptId(1,1);
createApplicationWithAMResource(attId1,"queue1","user1",amResource1);
createSchedulingRequestExistingApplication(1024,1,amPriority,attId1);
FSAppAttempt app1=scheduler.getSchedulerApp(attId1);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application1's AM requests 1024 MB memory",1024,app1.getAMResource().getMemory());
assertEquals("Application1's AM should be running",1,app1.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 1024 MB memory",1024,queue1.getAmResourceUsage().getMemory());
ApplicationAttemptId attId2=createAppAttemptId(2,1);
createApplicationWithAMResource(attId2,"queue1","user1",amResource1);
createSchedulingRequestExistingApplication(1024,1,amPriority,attId2);
FSAppAttempt app2=scheduler.getSchedulerApp(attId2);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application2's AM requests 1024 MB memory",1024,app2.getAMResource().getMemory());
assertEquals("Application2's AM should be running",1,app2.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
ApplicationAttemptId attId3=createAppAttemptId(3,1);
createApplicationWithAMResource(attId3,"queue1","user1",amResource1);
createSchedulingRequestExistingApplication(1024,1,amPriority,attId3);
FSAppAttempt app3=scheduler.getSchedulerApp(attId3);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application3's AM requests 1024 MB memory",1024,app3.getAMResource().getMemory());
assertEquals("Application3's AM should not be running",0,app3.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
createSchedulingRequestExistingApplication(1024,1,attId1);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application1 should have two running containers",2,app1.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
AppAttemptRemovedSchedulerEvent appRemovedEvent1=new AppAttemptRemovedSchedulerEvent(attId1,RMAppAttemptState.FINISHED,false);
scheduler.update();
scheduler.handle(appRemovedEvent1);
scheduler.handle(updateEvent);
assertEquals("Application1's AM should be finished",0,app1.getLiveContainers().size());
assertEquals("Application3's AM should be running",1,app3.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
ApplicationAttemptId attId4=createAppAttemptId(4,1);
createApplicationWithAMResource(attId4,"queue1","user1",amResource2);
createSchedulingRequestExistingApplication(2048,2,amPriority,attId4);
FSAppAttempt app4=scheduler.getSchedulerApp(attId4);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application4's AM requests 2048 MB memory",2048,app4.getAMResource().getMemory());
assertEquals("Application4's AM should not be running",0,app4.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
ApplicationAttemptId attId5=createAppAttemptId(5,1);
createApplicationWithAMResource(attId5,"queue1","user1",amResource2);
createSchedulingRequestExistingApplication(2048,2,amPriority,attId5);
FSAppAttempt app5=scheduler.getSchedulerApp(attId5);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application5's AM requests 2048 MB memory",2048,app5.getAMResource().getMemory());
assertEquals("Application5's AM should not be running",0,app5.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
AppAttemptRemovedSchedulerEvent appRemovedEvent4=new AppAttemptRemovedSchedulerEvent(attId4,RMAppAttemptState.KILLED,false);
scheduler.handle(appRemovedEvent4);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application5's AM should not be running",0,app5.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
AppAttemptRemovedSchedulerEvent appRemovedEvent2=new AppAttemptRemovedSchedulerEvent(attId2,RMAppAttemptState.FINISHED,false);
AppAttemptRemovedSchedulerEvent appRemovedEvent3=new AppAttemptRemovedSchedulerEvent(attId3,RMAppAttemptState.FINISHED,false);
scheduler.handle(appRemovedEvent2);
scheduler.handle(appRemovedEvent3);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application2's AM should be finished",0,app2.getLiveContainers().size());
assertEquals("Application3's AM should be finished",0,app3.getLiveContainers().size());
assertEquals("Application5's AM should be running",1,app5.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
ApplicationAttemptId attId6=createAppAttemptId(6,1);
createApplicationWithAMResource(attId6,"queue1","user1",amResource3);
createSchedulingRequestExistingApplication(1860,2,amPriority,attId6);
FSAppAttempt app6=scheduler.getSchedulerApp(attId6);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application6's AM should not be running",0,app6.getLiveContainers().size());
assertEquals("Application6's AM requests 2048 MB memory",2048,app6.getAMResource().getMemory());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
AppAttemptRemovedSchedulerEvent appRemovedEvent5=new AppAttemptRemovedSchedulerEvent(attId5,RMAppAttemptState.FINISHED,false);
AppAttemptRemovedSchedulerEvent appRemovedEvent6=new AppAttemptRemovedSchedulerEvent(attId6,RMAppAttemptState.FINISHED,false);
scheduler.handle(appRemovedEvent5);
scheduler.handle(appRemovedEvent6);
scheduler.update();
assertEquals("Queue1's AM resource usage should be 0",0,queue1.getAmResourceUsage().getMemory());
}
APIUtilityVerifierIterativeVerifierBranchVerifierInternalCallVerifierEqualityVerifier
/**
* Test to verify the behavior of{@link FSQueue#assignContainer(FSSchedulerNode)})
* Create two queues under root (fifoQueue and fairParent), and two queues
* under fairParent (fairChild1 and fairChild2). Submit two apps to the
* fifoQueue and one each to the fairChild* queues, all apps requiring 4
* containers each of the total 16 container capacity
* Assert the number of containers for each app after 4, 8, 12 and 16 updates.
* @throws Exception
*/
@Test(timeout=5000) public void testAssignContainer() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
final String user="user1";
final String fifoQueue="fifo";
final String fairParent="fairParent";
final String fairChild1=fairParent + ".fairChild1";
final String fairChild2=fairParent + ".fairChild2";
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(8192,8),1,"127.0.0.1");
RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(8192,8),2,"127.0.0.2");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2);
scheduler.handle(nodeEvent1);
scheduler.handle(nodeEvent2);
ApplicationAttemptId attId1=createSchedulingRequest(1024,fifoQueue,user,4);
ApplicationAttemptId attId2=createSchedulingRequest(1024,fairChild1,user,4);
ApplicationAttemptId attId3=createSchedulingRequest(1024,fairChild2,user,4);
ApplicationAttemptId attId4=createSchedulingRequest(1024,fifoQueue,user,4);
FSAppAttempt app1=scheduler.getSchedulerApp(attId1);
FSAppAttempt app2=scheduler.getSchedulerApp(attId2);
FSAppAttempt app3=scheduler.getSchedulerApp(attId3);
FSAppAttempt app4=scheduler.getSchedulerApp(attId4);
scheduler.getQueueManager().getLeafQueue(fifoQueue,true).setPolicy(SchedulingPolicy.parse("fifo"));
scheduler.update();
NodeUpdateSchedulerEvent updateEvent1=new NodeUpdateSchedulerEvent(node1);
NodeUpdateSchedulerEvent updateEvent2=new NodeUpdateSchedulerEvent(node2);
for (int i=0; i < 8; i++) {
scheduler.handle(updateEvent1);
scheduler.handle(updateEvent2);
if ((i + 1) % 2 == 0) {
String ERR="Wrong number of assigned containers after " + (i + 1) + " updates";
if (i < 4) {
assertEquals(ERR,(i + 1),app1.getLiveContainers().size());
assertEquals(ERR,0,app4.getLiveContainers().size());
}
else {
assertEquals(ERR,4,app1.getLiveContainers().size());
assertEquals(ERR,(i - 3),app4.getLiveContainers().size());
}
assertEquals(ERR,(i + 1) / 2,app2.getLiveContainers().size());
assertEquals(ERR,(i + 1) / 2,app3.getLiveContainers().size());
}
}
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testAppsQueryStates() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
RMApp killedApp=rm.submitApp(CONTAINER_MB);
rm.killApp(killedApp.getApplicationId());
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
MultivaluedMapImpl params=new MultivaluedMapImpl();
params.add("states",YarnApplicationState.ACCEPTED.toString());
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParams(params).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
assertEquals("state not equal to ACCEPTED","ACCEPTED",array.getJSONObject(0).getString("state"));
r=resource();
params=new MultivaluedMapImpl();
params.add("states",YarnApplicationState.ACCEPTED.toString());
params.add("states",YarnApplicationState.KILLED.toString());
response=r.path("ws").path("v1").path("cluster").path("apps").queryParams(params).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
assertTrue("both app states of ACCEPTED and KILLED are not present",(array.getJSONObject(0).getString("state").equals("ACCEPTED") && array.getJSONObject(1).getString("state").equals("KILLED")) || (array.getJSONObject(0).getString("state").equals("KILLED") && array.getJSONObject(1).getString("state").equals("ACCEPTED")));
rm.stop();
}
InternalCallVerifierEqualityVerifier
@Test public void testAppsQueryStatesNone() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("states",YarnApplicationState.RUNNING.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
assertEquals("apps is not null",JSONObject.NULL,json.get("apps"));
rm.stop();
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testAppsQueryStartEnd() throws JSONException, Exception {
rm.start();
rm.registerNode("127.0.0.1:1234",2048);
long end=System.currentTimeMillis();
Thread.sleep(1);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("startedTimeEnd",String.valueOf(end)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
assertEquals("apps is not null",JSONObject.NULL,json.get("apps"));
rm.stop();
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testAppsQueryStartBegin() throws JSONException, Exception {
rm.start();
long start=System.currentTimeMillis();
Thread.sleep(1);
rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("startedTimeBegin",String.valueOf(start)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",3,array.length());
rm.stop();
}
InternalCallVerifierEqualityVerifier
@Test public void testAppsQueryLimit() throws JSONException, Exception {
rm.start();
rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("limit","2").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
rm.stop();
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testAppsXMLMulti() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB,"testwordcount","user1");
rm.submitApp(2048,"testwordcount2","user1");
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodesApps=dom.getElementsByTagName("apps");
assertEquals("incorrect number of elements",1,nodesApps.getLength());
NodeList nodes=dom.getElementsByTagName("app");
assertEquals("incorrect number of elements",2,nodes.getLength());
rm.stop();
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testNonexistApp() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB,"testwordcount","user1");
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
try {
r.path("ws").path("v1").path("cluster").path("apps").path("application_00000_0099").accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid appid");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: app with id: application_00000_0099 not found",message);
WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname);
}
finally {
rm.stop();
}
}
InternalCallVerifierEqualityVerifier
@Test public void testAppsQueryFinalStatus() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
RMApp app1=rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("finalStatus",FinalApplicationStatus.UNDEFINED.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
System.out.println(json.toString());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
verifyAppInfo(array.getJSONObject(0),app1);
rm.stop();
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testAppsQueryFinishBegin() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
long start=System.currentTimeMillis();
Thread.sleep(1);
RMApp app1=rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
MockAM am=rm.sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId());
am.registerAppAttempt();
am.unregisterAppAttempt();
amNodeManager.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(),1,ContainerState.COMPLETE);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("finishedTimeBegin",String.valueOf(start)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
rm.stop();
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testAppsQueryFinishEnd() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
RMApp app1=rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
MockAM am=rm.sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId());
am.registerAppAttempt();
am.unregisterAppAttempt();
amNodeManager.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(),1,ContainerState.COMPLETE);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
long end=System.currentTimeMillis();
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("finishedTimeEnd",String.valueOf(end)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",3,array.length());
rm.stop();
}
InternalCallVerifierEqualityVerifier
@Test public void testAppsQueryUser() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("user",UserGroupInformation.getCurrentUser().getShortUserName()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
rm.stop();
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testAppsQueryStartBeginEnd() throws JSONException, Exception {
rm.start();
rm.registerNode("127.0.0.1:1234",2048);
long start=System.currentTimeMillis();
Thread.sleep(1);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
long end=System.currentTimeMillis();
Thread.sleep(1);
rm.submitApp(CONTAINER_MB);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("startedTimeBegin",String.valueOf(start)).queryParam("startedTimeEnd",String.valueOf(end)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
rm.stop();
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testAppsQueryAppTypes() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
Thread.sleep(1);
RMApp app1=rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
MockAM am=rm.sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId());
am.registerAppAttempt();
am.unregisterAppAttempt();
amNodeManager.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(),1,ContainerState.COMPLETE);
rm.submitApp(CONTAINER_MB,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,2,null,"MAPREDUCE");
rm.submitApp(CONTAINER_MB,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,2,null,"NON-YARN");
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","MAPREDUCE").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
assertEquals("MAPREDUCE",array.getJSONObject(0).getString("applicationType"));
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN").queryParam("applicationTypes","MAPREDUCE").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
assertTrue((array.getJSONObject(0).getString("applicationType").equals("YARN") && array.getJSONObject(1).getString("applicationType").equals("MAPREDUCE")) || (array.getJSONObject(1).getString("applicationType").equals("YARN") && array.getJSONObject(0).getString("applicationType").equals("MAPREDUCE")));
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN,NON-YARN").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
assertTrue((array.getJSONObject(0).getString("applicationType").equals("YARN") && array.getJSONObject(1).getString("applicationType").equals("NON-YARN")) || (array.getJSONObject(1).getString("applicationType").equals("YARN") && array.getJSONObject(0).getString("applicationType").equals("NON-YARN")));
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",3,array.length());
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN,NON-YARN").queryParam("applicationTypes","MAPREDUCE").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",3,array.length());
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN").queryParam("applicationTypes","").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
assertEquals("YARN",array.getJSONObject(0).getString("applicationType"));
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes",",,, ,, YARN ,, ,").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
assertEquals("YARN",array.getJSONObject(0).getString("applicationType"));
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes",",,, ,, ,, ,").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",3,array.length());
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN, ,NON-YARN, ,,").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
assertTrue((array.getJSONObject(0).getString("applicationType").equals("YARN") && array.getJSONObject(1).getString("applicationType").equals("NON-YARN")) || (array.getJSONObject(1).getString("applicationType").equals("YARN") && array.getJSONObject(0).getString("applicationType").equals("NON-YARN")));
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes"," YARN, , ,,,").queryParam("applicationTypes","MAPREDUCE , ,, ,").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
assertTrue((array.getJSONObject(0).getString("applicationType").equals("YARN") && array.getJSONObject(1).getString("applicationType").equals("MAPREDUCE")) || (array.getJSONObject(1).getString("applicationType").equals("YARN") && array.getJSONObject(0).getString("applicationType").equals("MAPREDUCE")));
rm.stop();
}
InternalCallVerifierEqualityVerifier
@Test public void testAppsQueryState() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
RMApp app1=rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("state",YarnApplicationState.ACCEPTED.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
verifyAppInfo(array.getJSONObject(0),app1);
rm.stop();
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testAppsQueryFinishBeginEnd() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
long start=System.currentTimeMillis();
Thread.sleep(1);
RMApp app1=rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
MockAM am=rm.sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId());
am.registerAppAttempt();
am.unregisterAppAttempt();
amNodeManager.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(),1,ContainerState.COMPLETE);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
long end=System.currentTimeMillis();
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("finishedTimeBegin",String.valueOf(start)).queryParam("finishedTimeEnd",String.valueOf(end)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
rm.stop();
}
UtilityVerifierInternalCallVerifierEqualityVerifierHybridVerifier
@Test public void testNonexistAppAttempts() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB,"testwordcount","user1");
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
try {
r.path("ws").path("v1").path("cluster").path("apps").path("application_00000_0099").accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid appid");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: app with id: application_00000_0099 not found",message);
WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname);
}
finally {
rm.stop();
}
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testAppAttemptsXML() throws JSONException, Exception {
rm.start();
String user="user1";
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
RMApp app1=rm.submitApp(CONTAINER_MB,"testwordcount",user);
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").path(app1.getApplicationId().toString()).path("appattempts").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("appAttempts");
assertEquals("incorrect number of elements",1,nodes.getLength());
NodeList attempt=dom.getElementsByTagName("appAttempt");
assertEquals("incorrect number of elements",1,attempt.getLength());
verifyAppAttemptsXML(attempt,app1.getCurrentAppAttempt(),user);
rm.stop();
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test(timeout=20000) public void testMultipleAppAttempts() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",8192);
RMApp app1=rm.submitApp(CONTAINER_MB,"testwordcount","user1");
MockAM am=MockRM.launchAndRegisterAM(app1,rm,amNodeManager);
int maxAppAttempts=rm.getConfig().getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
assertTrue(maxAppAttempts > 1);
int numAttempt=1;
while (true) {
amNodeManager.nodeHeartbeat(am.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am.waitForState(RMAppAttemptState.FAILED);
if (numAttempt == maxAppAttempts) {
rm.waitForState(app1.getApplicationId(),RMAppState.FAILED);
break;
}
rm.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
am=MockRM.launchAndRegisterAM(app1,rm,amNodeManager);
numAttempt++;
}
assertEquals("incorrect number of attempts",maxAppAttempts,app1.getAppAttempts().values().size());
testAppAttemptsHelper(app1.getApplicationId().toString(),app1,MediaType.APPLICATION_JSON);
rm.stop();
}
APIUtilityVerifierInternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testAppsQueryStatesComma() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
RMApp killedApp=rm.submitApp(CONTAINER_MB);
rm.killApp(killedApp.getApplicationId());
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
MultivaluedMapImpl params=new MultivaluedMapImpl();
params.add("states",YarnApplicationState.ACCEPTED.toString());
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParams(params).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
assertEquals("state not equal to ACCEPTED","ACCEPTED",array.getJSONObject(0).getString("state"));
r=resource();
params=new MultivaluedMapImpl();
params.add("states",YarnApplicationState.ACCEPTED.toString() + "," + YarnApplicationState.KILLED.toString());
response=r.path("ws").path("v1").path("cluster").path("apps").queryParams(params).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
assertTrue("both app states of ACCEPTED and KILLED are not present",(array.getJSONObject(0).getString("state").equals("ACCEPTED") && array.getJSONObject(1).getString("state").equals("KILLED")) || (array.getJSONObject(0).getString("state").equals("KILLED") && array.getJSONObject(1).getString("state").equals("ACCEPTED")));
rm.stop();
}
InternalCallVerifierEqualityVerifier
@Test public void testAppsQueryFinalStatusNone() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("finalStatus",FinalApplicationStatus.KILLED.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
assertEquals("apps is not null",JSONObject.NULL,json.get("apps"));
rm.stop();
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testAppsXML() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
RMApp app1=rm.submitApp(CONTAINER_MB,"testwordcount","user1");
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodesApps=dom.getElementsByTagName("apps");
assertEquals("incorrect number of elements",1,nodesApps.getLength());
NodeList nodes=dom.getElementsByTagName("app");
assertEquals("incorrect number of elements",1,nodes.getLength());
verifyAppsXML(nodes,app1);
rm.stop();
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
@Test public void testAppsQueryStartBeginSome() throws JSONException, Exception {
rm.start();
rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
long start=System.currentTimeMillis();
Thread.sleep(1);
rm.submitApp(CONTAINER_MB);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("startedTimeBegin",String.valueOf(start)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
rm.stop();
}
APIUtilityVerifierEqualityVerifier
@Test public void testGetProxyUriNull() throws Exception {
URI originalUri=null;
URI proxyUri=new URI("http://proxy.net:8080/");
ApplicationId id=BuilderUtils.newApplicationId(6384623l,5);
URI expected=new URI("http://proxy.net:8080/proxy/application_6384623_0005/");
URI result=ProxyUriUtils.getProxyUri(originalUri,proxyUri,id);
assertEquals(expected,result);
}
EqualityVerifier
@Test public void testGetPathAndQuery(){
assertEquals("/proxy/application_6384623_0005/static/app?foo=bar",ProxyUriUtils.getPathAndQuery(BuilderUtils.newApplicationId(6384623l,5),"/static/app","?foo=bar",false));
assertEquals("/proxy/application_6384623_0005/static/app?foo=bar&bad=good&proxyapproved=true",ProxyUriUtils.getPathAndQuery(BuilderUtils.newApplicationId(6384623l,5),"/static/app","foo=bar&bad=good",true));
}
EqualityVerifier
@Test public void testGetPathApplicationId(){
assertEquals("/proxy/application_100_0001",ProxyUriUtils.getPath(BuilderUtils.newApplicationId(100l,1)));
assertEquals("/proxy/application_6384623_0005",ProxyUriUtils.getPath(BuilderUtils.newApplicationId(6384623l,5)));
}
EqualityVerifier
@Test public void testGetPathApplicationIdString(){
assertEquals("/proxy/application_6384623_0005",ProxyUriUtils.getPath(BuilderUtils.newApplicationId(6384623l,5),null));
assertEquals("/proxy/application_6384623_0005/static/app",ProxyUriUtils.getPath(BuilderUtils.newApplicationId(6384623l,5),"/static/app"));
assertEquals("/proxy/application_6384623_0005/",ProxyUriUtils.getPath(BuilderUtils.newApplicationId(6384623l,5),"/"));
assertEquals("/proxy/application_6384623_0005/some/path",ProxyUriUtils.getPath(BuilderUtils.newApplicationId(6384623l,5),"some/path"));
}
APIUtilityVerifierEqualityVerifier
@Test public void testGetProxyUri() throws Exception {
URI originalUri=new URI("http://host.com/static/foo?bar=bar");
URI proxyUri=new URI("http://proxy.net:8080/");
ApplicationId id=BuilderUtils.newApplicationId(6384623l,5);
URI expected=new URI("http://proxy.net:8080/proxy/application_6384623_0005/static/foo?bar=bar");
URI result=ProxyUriUtils.getProxyUri(originalUri,proxyUri,id);
assertEquals(expected,result);
}
APIUtilityVerifierEqualityVerifier
@Test public void testBindAddress(){
YarnConfiguration conf=new YarnConfiguration();
InetSocketAddress defaultBindAddress=WebAppProxyServer.getBindAddress(conf);
Assert.assertEquals("Web Proxy default bind address port is incorrect",YarnConfiguration.DEFAULT_PROXY_PORT,defaultBindAddress.getPort());
}
BranchVerifierInternalCallVerifierEqualityVerifier
@Test public void testStart(){
assertEquals(STATE.INITED,webAppProxy.getServiceState());
webAppProxy.start();
for ( Service service : webAppProxy.getServices()) {
if (service instanceof WebAppProxy) {
assertEquals(((WebAppProxy)service).getBindAddress(),proxyAddress);
}
}
assertEquals(STATE.STARTED,webAppProxy.getServiceState());
}
InternalCallVerifierBooleanVerifierEqualityVerifierHybridVerifier
@Test public void testNMSimulator() throws Exception {
NMSimulator node1=new NMSimulator();
node1.init("rack1/node1",GB * 10,10,0,1000,rm);
node1.middleStep();
Assert.assertEquals(1,rm.getResourceScheduler().getNumClusterNodes());
Assert.assertEquals(GB * 10,rm.getResourceScheduler().getRootQueueMetrics().getAvailableMB());
Assert.assertEquals(10,rm.getResourceScheduler().getRootQueueMetrics().getAvailableVirtualCores());
ContainerId cId1=newContainerId(1,1,1);
Container container1=Container.newInstance(cId1,null,null,Resources.createResource(GB,1),null,null);
node1.addNewContainer(container1,100000l);
Assert.assertTrue("Node1 should have one running container.",node1.getRunningContainers().containsKey(cId1));
ContainerId cId2=newContainerId(2,1,1);
Container container2=Container.newInstance(cId2,null,null,Resources.createResource(GB,1),null,null);
node1.addNewContainer(container2,-1l);
Assert.assertTrue("Node1 should have one running AM container",node1.getAMContainers().contains(cId2));
node1.cleanupContainer(cId1);
Assert.assertTrue("Container1 should be removed from Node1.",node1.getCompletedContainers().contains(cId1));
node1.cleanupContainer(cId2);
Assert.assertFalse("Container2 should be removed from Node1.",node1.getAMContainers().contains(cId2));
}
APIUtilityVerifierInternalCallVerifierEqualityVerifier
/**
* A basic test that creates a few process directories and writes stat files.
* Verifies that the cpu time and memory is correctly computed.
* @throws IOExceptionif there was a problem setting up the fake procfs directories or
* files.
*/
@Test(timeout=30000) public void testCpuAndMemoryForProcessTree() throws IOException {
String[] pids={"100","200","300","400"};
File procfsRootDir=new File(TEST_ROOT_DIR,"proc");
try {
setupProcfsRootDir(procfsRootDir);
setupPidDirs(procfsRootDir,pids);
ProcessStatInfo[] procInfos=new ProcessStatInfo[4];
procInfos[0]=new ProcessStatInfo(new String[]{"100","proc1","1","100","100","100000","100","1000","200"});
procInfos[1]=new ProcessStatInfo(new String[]{"200","proc2","100","100","100","200000","200","2000","400"});
procInfos[2]=new ProcessStatInfo(new String[]{"300","proc3","200","100","100","300000","300","3000","600"});
procInfos[3]=new ProcessStatInfo(new String[]{"400","proc4","1","400","400","400000","400","4000","800"});
ProcessTreeSmapMemInfo[] memInfo=new ProcessTreeSmapMemInfo[4];
memInfo[0]=new ProcessTreeSmapMemInfo("100");
memInfo[1]=new ProcessTreeSmapMemInfo("200");
memInfo[2]=new ProcessTreeSmapMemInfo("300");
memInfo[3]=new ProcessTreeSmapMemInfo("400");
createMemoryMappingInfo(memInfo);
writeStatFiles(procfsRootDir,pids,procInfos,memInfo);
Configuration conf=new Configuration();
ProcfsBasedProcessTree processTree=createProcessTree("100",procfsRootDir.getAbsolutePath());
processTree.setConf(conf);
processTree.updateProcessTree();
Assert.assertEquals("Cumulative virtual memory does not match",600000L,processTree.getCumulativeVmem());
long cumuRssMem=ProcfsBasedProcessTree.PAGE_SIZE > 0 ? 600L * ProcfsBasedProcessTree.PAGE_SIZE : 0L;
Assert.assertEquals("Cumulative rss memory does not match",cumuRssMem,processTree.getCumulativeRssmem());
long cumuCpuTime=ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS > 0 ? 7200L * ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS : 0L;
Assert.assertEquals("Cumulative cpu time does not match",cumuCpuTime,processTree.getCumulativeCpuTime());
setSmapsInProceTree(processTree,true);
Assert.assertEquals("Cumulative rss memory does not match",(100 * KB_TO_BYTES * 3),processTree.getCumulativeRssmem());
procInfos[0]=new ProcessStatInfo(new String[]{"100","proc1","1","100","100","100000","100","2000","300"});
procInfos[1]=new ProcessStatInfo(new String[]{"200","proc2","100","100","100","200000","200","3000","500"});
writeStatFiles(procfsRootDir,pids,procInfos,memInfo);
processTree.updateProcessTree();
cumuCpuTime=ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS > 0 ? 9400L * ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS : 0L;
Assert.assertEquals("Cumulative cpu time does not match",cumuCpuTime,processTree.getCumulativeCpuTime());
}
finally {
FileUtil.fullyDelete(procfsRootDir);
}
}
EqualityVerifier
@Test public void testScriptName(){
Configuration conf=new Configuration();
conf.setClass(CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,ScriptBasedMapping.class,DNSToSwitchMapping.class);
conf.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY,"testScript");
RackResolver.init(conf);
Assert.assertEquals(RackResolver.getDnsToSwitchMapping().toString(),"script-based mapping with script testScript");
}
Class: org.apache.hadoop.yarn.util.TestTimes
EqualityVerifier
@Test public void testPositiveStartandFinishTimes(){
long elapsed=Times.elapsed(5,10,true);
Assert.assertEquals("Elapsed time is not 5",5,elapsed);
elapsed=Times.elapsed(5,10,false);
Assert.assertEquals("Elapsed time is not 5",5,elapsed);
}
EqualityVerifier
@Test public void testNegativeStartandFinishTimes(){
long elapsed=Times.elapsed(-5,-10,false);
Assert.assertEquals("Elapsed time is not -1",-1,elapsed);
}
EqualityVerifier
@Test public void testNegativeStartTimes(){
long elapsed=Times.elapsed(-5,10,true);
Assert.assertEquals("Elapsed time is not 0",0,elapsed);
elapsed=Times.elapsed(-5,10,false);
Assert.assertEquals("Elapsed time is not -1",-1,elapsed);
}
EqualityVerifier
@Test public void testFinishTimesAheadOfStartTimes(){
long elapsed=Times.elapsed(10,5,true);
Assert.assertEquals("Elapsed time is not -1",-1,elapsed);
elapsed=Times.elapsed(10,5,false);
Assert.assertEquals("Elapsed time is not -1",-1,elapsed);
elapsed=Times.elapsed(Long.MAX_VALUE,0,true);
Assert.assertEquals("Elapsed time is not -1",-1,elapsed);
}
EqualityVerifier
@Test public void testNegativeFinishTimes(){
long elapsed=Times.elapsed(5,-10,false);
Assert.assertEquals("Elapsed time is not -1",-1,elapsed);
}
InternalCallVerifierEqualityVerifier
@Test public void testPreformatted(){
Hamlet h=newHamlet().div().i("inline before pre").pre()._("pre text1\npre text2").i("inline in pre")._("pre text after inline")._().i("inline after pre")._();
PrintWriter out=h.getWriter();
out.flush();
assertEquals(5,h.indents);
}
InternalCallVerifierEqualityVerifier
@Test public void testScriptStyle(){
Hamlet h=newHamlet().script("a.js").script("b.js").style("h1 { font-size: 1.2em }");
PrintWriter out=h.getWriter();
out.flush();
assertEquals(0,h.nestLevel);
verify(out,times(2)).print(" type=\"text/javascript\"");
verify(out).print(" type=\"text/css\"");
}
InternalCallVerifierEqualityVerifier
@Test public void testHamlet(){
Hamlet h=newHamlet().title("test").h1("heading 1").p("#id.class").b("hello").em("world!")._().div("#footer")._("Brought to you by").a("http://hostname/","Somebody")._();
PrintWriter out=h.getWriter();
out.flush();
assertEquals(0,h.nestLevel);
verify(out).print("");
verify(out).print("